[new feature]Add metrics (#674)

This commit is contained in:
qicosmos 2024-06-07 23:15:51 +08:00 committed by GitHub
parent 9d2966424e
commit c0006099ab
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 2169 additions and 43 deletions

View File

@ -9,41 +9,41 @@ on:
workflow_dispatch:
jobs:
windows_msvc:
runs-on: windows-latest
# windows_msvc:
# runs-on: windows-latest
strategy:
matrix:
mode: [ Release ] #[ Release, Debug ] #Debug not support ccache
#https://github.com/ccache/ccache/wiki/MS-Visual-Studio
#https://github.com/ccache/ccache/issues/1040
arch: [ amd64, x86 ] #[ amd64,x86 ]
ssl: [ OFF ] #[ ON, OFF ]
# strategy:
# matrix:
# mode: [ Release ] #[ Release, Debug ] #Debug not support ccache
# #https://github.com/ccache/ccache/wiki/MS-Visual-Studio
# #https://github.com/ccache/ccache/issues/1040
# arch: [ amd64, x86 ] #[ amd64,x86 ]
# ssl: [ OFF ] #[ ON, OFF ]
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Enable Developer Command Prompt
uses: ilammy/msvc-dev-cmd@v1.12.0
with:
arch: ${{ matrix.arch }}
- name: Install ninja-build tool
uses: seanmiddleditch/gha-setup-ninja@master
with:
version: 1.11.1
- name: latest ccache
run: choco install ccache
- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ github.job }}-${{ matrix.mode}}-ssl( ${{ matrix.ssl}} )-arch-${{ matrix.arch}}
- name: Configure CMake
run: cmake -B ${{github.workspace}}\build -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.mode }} -DYLT_ENABLE_SSL=${{matrix.ssl}} -DUSE_CCACHE=ON
- name: Build
run: cmake --build ${{github.workspace}}\build
- name: Test
working-directory: ${{github.workspace}}\build
run: ctest -C ${{matrix.mode}} -j 1 -V
# steps:
# - name: Checkout
# uses: actions/checkout@v3
# - name: Enable Developer Command Prompt
# uses: ilammy/msvc-dev-cmd@v1.12.0
# with:
# arch: ${{ matrix.arch }}
# - name: Install ninja-build tool
# uses: seanmiddleditch/gha-setup-ninja@master
# with:
# version: 1.11.1
# - name: latest ccache
# run: choco install ccache
# - name: ccache
# uses: hendrikmuhs/ccache-action@v1.2
# with:
# key: ${{ github.job }}-${{ matrix.mode}}-ssl( ${{ matrix.ssl}} )-arch-${{ matrix.arch}}
# - name: Configure CMake
# run: cmake -B ${{github.workspace}}\build -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.mode }} -DYLT_ENABLE_SSL=${{matrix.ssl}} -DUSE_CCACHE=ON
# - name: Build
# run: cmake --build ${{github.workspace}}\build
# - name: Test
# working-directory: ${{github.workspace}}\build
# run: ctest -C ${{matrix.mode}} -j 1 -V
windows_msvc_2019:
runs-on: windows-2019

View File

@ -14,7 +14,6 @@
* limitations under the License.
*/
#pragma once
#include "cinatra/uri.hpp"
#ifdef YLT_ENABLE_SSL
#define CINATRA_ENABLE_SSL
#endif

View File

@ -22,6 +22,7 @@
#include <asio/steady_timer.hpp>
#include <atomic>
#include <future>
#include <iostream>
#include <memory>
#include <mutex>
#include <thread>
@ -117,6 +118,8 @@ class io_context_pool {
pool_size = 1; // set default value as 1
}
total_thread_num_ += pool_size;
for (std::size_t i = 0; i < pool_size; ++i) {
io_context_ptr io_context(new asio::io_context(1));
work_ptr work(new asio::io_context::work(*io_context));
@ -150,8 +153,11 @@ class io_context_pool {
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(i, &cpuset);
pthread_setaffinity_np(threads.back()->native_handle(),
sizeof(cpu_set_t), &cpuset);
int rc = pthread_setaffinity_np(threads.back()->native_handle(),
sizeof(cpu_set_t), &cpuset);
if (rc != 0) {
std::cerr << "Error calling pthread_setaffinity_np: " << rc << "\n";
}
}
#endif
}
@ -201,6 +207,8 @@ class io_context_pool {
template <typename T>
friend io_context_pool &g_io_context_pool();
static size_t get_total_thread_num() { return total_thread_num_; }
private:
using io_context_ptr = std::shared_ptr<asio::io_context>;
using work_ptr = std::shared_ptr<asio::io_context::work>;
@ -213,8 +221,13 @@ class io_context_pool {
std::atomic<bool> has_run_or_stop_ = false;
std::once_flag flag_;
bool cpu_affinity_ = false;
inline static std::atomic<size_t> total_thread_num_ = 0;
};
inline size_t get_total_thread_num() {
return io_context_pool::get_total_thread_num();
}
class multithread_context_pool {
public:
multithread_context_pool(size_t thd_num = std::thread::hardware_concurrency())

View File

@ -0,0 +1,258 @@
#pragma once
#include <atomic>
#include <chrono>
#include "metric.hpp"
namespace ylt {
enum class op_type_t { INC, DEC, SET };
struct counter_sample {
op_type_t op_type;
std::vector<std::string> labels_value;
double value;
};
class counter_t : public metric_t {
public:
// default, no labels, only contains an atomic value.
counter_t(std::string name, std::string help)
: metric_t(MetricType::Counter, std::move(name), std::move(help)) {
use_atomic_ = true;
}
// static labels value, contains a map with atomic value.
counter_t(std::string name, std::string help,
std::map<std::string, std::string> labels)
: metric_t(MetricType::Counter, std::move(name), std::move(help)) {
for (auto &[k, v] : labels) {
labels_name_.push_back(k);
labels_value_.push_back(v);
}
atomic_value_map_.emplace(labels_value_, 0);
use_atomic_ = true;
}
// dynamic labels value
counter_t(std::string name, std::string help,
std::vector<std::string> labels_name)
: metric_t(MetricType::Counter, std::move(name), std::move(help),
std::move(labels_name)) {}
virtual ~counter_t() {}
double value() { return default_lable_value_; }
double value(const std::vector<std::string> &labels_value) {
if (use_atomic_) {
double val = atomic_value_map_[labels_value];
return val;
}
else {
std::lock_guard lock(mtx_);
return value_map_[labels_value];
}
}
std::map<std::vector<std::string>, double,
std::less<std::vector<std::string>>>
value_map() {
std::map<std::vector<std::string>, double,
std::less<std::vector<std::string>>>
map;
if (use_atomic_) {
map = {atomic_value_map_.begin(), atomic_value_map_.end()};
}
else {
std::lock_guard lock(mtx_);
map = value_map_;
}
return map;
}
void serialize(std::string &str) override {
if (labels_name_.empty()) {
if (default_lable_value_ == 0) {
return;
}
serialize_head(str);
serialize_default_label(str);
return;
}
serialize_head(str);
std::string s;
if (use_atomic_) {
serialize_map(atomic_value_map_, s);
}
else {
serialize_map(value_map_, s);
}
if (s.empty()) {
str.clear();
}
else {
str.append(s);
}
}
void inc(double val = 1) {
if (val < 0) {
throw std::invalid_argument("the value is less than zero");
}
#ifdef __APPLE__
mac_os_atomic_fetch_add(&default_lable_value_, val);
#else
default_lable_value_ += val;
#endif
}
void inc(const std::vector<std::string> &labels_value, double value = 1) {
if (value == 0) {
return;
}
validate(labels_value, value);
if (use_atomic_) {
if (labels_value != labels_value_) {
throw std::invalid_argument(
"the given labels_value is not match with origin labels_value");
}
set_value<true>(atomic_value_map_[labels_value], value, op_type_t::INC);
}
else {
std::lock_guard lock(mtx_);
set_value<false>(value_map_[labels_value], value, op_type_t::INC);
}
}
void update(double value) { default_lable_value_ = value; }
void update(const std::vector<std::string> &labels_value, double value) {
if (labels_value.empty() || labels_name_.size() != labels_value.size()) {
throw std::invalid_argument(
"the number of labels_value name and labels_value is not match");
}
if (use_atomic_) {
if (labels_value != labels_value_) {
throw std::invalid_argument(
"the given labels_value is not match with origin labels_value");
}
set_value<true>(atomic_value_map_[labels_value], value, op_type_t::SET);
}
else {
std::lock_guard lock(mtx_);
set_value<false>(value_map_[labels_value], value, op_type_t::SET);
}
}
std::map<std::vector<std::string>, std::atomic<double>,
std::less<std::vector<std::string>>>
&atomic_value_map() {
return atomic_value_map_;
}
protected:
void serialize_default_label(std::string &str) {
str.append(name_);
if (labels_name_.empty()) {
str.append(" ");
}
if (type_ == MetricType::Counter) {
str.append(std::to_string((int64_t)default_lable_value_));
}
else {
str.append(std::to_string(default_lable_value_));
}
str.append("\n");
}
template <typename T>
void serialize_map(T &value_map, std::string &str) {
for (auto &[labels_value, value] : value_map) {
if (value == 0) {
continue;
}
str.append(name_);
str.append("{");
build_string(str, labels_name_, labels_value);
str.append("} ");
if (type_ == MetricType::Counter) {
str.append(std::to_string((int64_t)value));
}
else {
str.append(std::to_string(value));
}
str.append("\n");
}
}
void build_string(std::string &str, const std::vector<std::string> &v1,
const std::vector<std::string> &v2) {
for (size_t i = 0; i < v1.size(); i++) {
str.append(v1[i]).append("=\"").append(v2[i]).append("\"").append(",");
}
str.pop_back();
}
void validate(const std::vector<std::string> &labels_value, double value) {
if (value < 0) {
throw std::invalid_argument("the value is less than zero");
}
if (labels_value.empty() || labels_name_.size() != labels_value.size()) {
throw std::invalid_argument(
"the number of labels_value name and labels_value is not match");
}
}
template <bool is_atomic = false, typename T>
void set_value(T &label_val, double value, op_type_t type) {
switch (type) {
case op_type_t::INC: {
#ifdef __APPLE__
if constexpr (is_atomic) {
mac_os_atomic_fetch_add(&label_val, value);
}
else {
label_val += value;
}
#else
label_val += value;
#endif
} break;
case op_type_t::DEC:
#ifdef __APPLE__
if constexpr (is_atomic) {
mac_os_atomic_fetch_sub(&label_val, value);
}
else {
label_val -= value;
}
#else
label_val -= value;
#endif
break;
case op_type_t::SET:
label_val = value;
break;
}
}
std::map<std::vector<std::string>, std::atomic<double>,
std::less<std::vector<std::string>>>
atomic_value_map_;
std::atomic<double> default_lable_value_ = 0;
std::mutex mtx_;
std::map<std::vector<std::string>, double,
std::less<std::vector<std::string>>>
value_map_;
};
} // namespace ylt

View File

@ -0,0 +1,175 @@
#pragma once
#include <array>
#include <cmath>
#include <vector>
// https://github.com/jupp0r/prometheus-cpp/blob/master/core/include/prometheus/detail/ckms_quantiles.h
namespace ylt {
class CKMSQuantiles {
public:
struct Quantile {
Quantile(double quantile, double error)
: quantile(quantile),
error(error),
u(2.0 * error / (1.0 - quantile)),
v(2.0 * error / quantile) {}
double quantile;
double error;
double u;
double v;
};
private:
struct Item {
double value;
int g;
int delta;
Item(double value, int lower_delta, int delta)
: value(value), g(lower_delta), delta(delta) {}
};
public:
explicit CKMSQuantiles(const std::vector<Quantile>& quantiles)
: quantiles_(quantiles), count_(0), buffer_{}, buffer_count_(0) {}
void insert(double value) {
buffer_[buffer_count_] = value;
++buffer_count_;
if (buffer_count_ == buffer_.size()) {
insertBatch();
compress();
}
}
double get(double q) {
insertBatch();
compress();
if (sample_.empty()) {
return std::numeric_limits<double>::quiet_NaN();
}
int rankMin = 0;
const auto desired = static_cast<int>(q * count_);
const auto bound = desired + (allowableError(desired) / 2);
auto it = sample_.begin();
decltype(it) prev;
auto cur = it++;
while (it != sample_.end()) {
prev = cur;
cur = it++;
rankMin += prev->g;
if (rankMin + cur->g + cur->delta > bound) {
return prev->value;
}
}
return sample_.back().value;
}
void reset() {
count_ = 0;
sample_.clear();
buffer_count_ = 0;
}
private:
double allowableError(int rank) {
auto size = sample_.size();
double minError = size + 1;
for (const auto& q : quantiles_.get()) {
double error;
if (rank <= q.quantile * size) {
error = q.u * (size - rank);
}
else {
error = q.v * rank;
}
if (error < minError) {
minError = error;
}
}
return minError;
}
bool insertBatch() {
if (buffer_count_ == 0) {
return false;
}
std::sort(buffer_.begin(), buffer_.begin() + buffer_count_);
std::size_t start = 0;
if (sample_.empty()) {
sample_.emplace_back(buffer_[0], 1, 0);
++start;
++count_;
}
std::size_t idx = 0;
std::size_t item = idx++;
for (std::size_t i = start; i < buffer_count_; ++i) {
double v = buffer_[i];
while (idx < sample_.size() && sample_[item].value < v) {
item = idx++;
}
if (sample_[item].value > v) {
--idx;
}
int delta;
if (idx - 1 == 0 || idx + 1 == sample_.size()) {
delta = 0;
}
else {
delta = static_cast<int>(std::floor(allowableError(idx + 1))) + 1;
}
sample_.emplace(sample_.begin() + idx, v, 1, delta);
count_++;
item = idx++;
}
buffer_count_ = 0;
return true;
}
void compress() {
if (sample_.size() < 2) {
return;
}
std::size_t idx = 0;
std::size_t prev;
std::size_t next = idx++;
while (idx < sample_.size()) {
prev = next;
next = idx++;
if (sample_[prev].g + sample_[next].g + sample_[next].delta <=
allowableError(idx - 1)) {
sample_[next].g += sample_[prev].g;
sample_.erase(sample_.begin() + prev);
}
}
}
private:
const std::reference_wrapper<const std::vector<Quantile>> quantiles_;
std::size_t count_;
std::vector<Item> sample_;
std::array<double, 500> buffer_;
std::size_t buffer_count_;
};
} // namespace ylt

View File

@ -0,0 +1,52 @@
#pragma once
#include "ckms_quantiles.hpp"
// https://github.com/jupp0r/prometheus-cpp/blob/master/core/include/prometheus/detail/time_window_quantiles.h
namespace ylt {
class TimeWindowQuantiles {
using Clock = std::chrono::steady_clock;
public:
TimeWindowQuantiles(const std::vector<CKMSQuantiles::Quantile>& quantiles,
Clock::duration max_age_seconds, int age_buckets)
: quantiles_(quantiles),
ckms_quantiles_(age_buckets, CKMSQuantiles(quantiles_)),
current_bucket_(0),
last_rotation_(Clock::now()),
rotation_interval_(max_age_seconds / age_buckets) {}
double get(double q) const {
CKMSQuantiles& current_bucket = rotate();
return current_bucket.get(q);
}
void insert(double value) {
rotate();
for (auto& bucket : ckms_quantiles_) {
bucket.insert(value);
}
}
private:
CKMSQuantiles& rotate() const {
auto delta = Clock::now() - last_rotation_;
while (delta > rotation_interval_) {
ckms_quantiles_[current_bucket_].reset();
if (++current_bucket_ >= ckms_quantiles_.size()) {
current_bucket_ = 0;
}
delta -= rotation_interval_;
last_rotation_ += rotation_interval_;
}
return ckms_quantiles_[current_bucket_];
}
const std::vector<CKMSQuantiles::Quantile>& quantiles_;
mutable std::vector<CKMSQuantiles> ckms_quantiles_;
mutable std::size_t current_bucket_;
mutable Clock::time_point last_rotation_;
const Clock::duration rotation_interval_;
};
} // namespace ylt

View File

@ -0,0 +1,52 @@
#pragma once
#include <chrono>
#include "counter.hpp"
namespace ylt {
class gauge_t : public counter_t {
public:
gauge_t(std::string name, std::string help)
: counter_t(std::move(name), std::move(help)) {
set_metric_type(MetricType::Gauge);
}
gauge_t(std::string name, std::string help,
std::vector<std::string> labels_name)
: counter_t(std::move(name), std::move(help), std::move(labels_name)) {
set_metric_type(MetricType::Gauge);
}
gauge_t(std::string name, std::string help,
std::map<std::string, std::string> labels)
: counter_t(std::move(name), std::move(help), std::move(labels)) {
set_metric_type(MetricType::Gauge);
}
void dec(double value = 1) {
#ifdef __APPLE__
mac_os_atomic_fetch_sub(&default_lable_value_, value);
#else
default_lable_value_ -= value;
#endif
}
void dec(const std::vector<std::string>& labels_value, double value = 1) {
if (value == 0) {
return;
}
validate(labels_value, value);
if (use_atomic_) {
if (labels_value != labels_value_) {
throw std::invalid_argument(
"the given labels_value is not match with origin labels_value");
}
set_value<true>(atomic_value_map_[labels_value], value, op_type_t::DEC);
}
else {
std::lock_guard lock(mtx_);
set_value<false>(value_map_[labels_value], value, op_type_t::DEC);
}
}
};
} // namespace ylt

View File

@ -0,0 +1,83 @@
#pragma once
#include <algorithm>
#include <cstddef>
#include <memory>
#include <vector>
#include "counter.hpp"
#include "metric.hpp"
namespace ylt {
class histogram_t : public metric_t {
public:
histogram_t(std::string name, std::string help, std::vector<double> buckets)
: bucket_boundaries_(buckets),
metric_t(MetricType::Histogram, std::move(name), std::move(help)),
sum_(std::make_shared<gauge_t>("", "")) {
if (!is_strict_sorted(begin(bucket_boundaries_), end(bucket_boundaries_))) {
throw std::invalid_argument("Bucket Boundaries must be strictly sorted");
}
for (size_t i = 0; i < buckets.size() + 1; i++) {
bucket_counts_.push_back(std::make_shared<counter_t>("", ""));
}
use_atomic_ = true;
}
void observe(double value) {
const auto bucket_index = static_cast<std::size_t>(
std::distance(bucket_boundaries_.begin(),
std::lower_bound(bucket_boundaries_.begin(),
bucket_boundaries_.end(), value)));
sum_->inc(value);
bucket_counts_[bucket_index]->inc();
}
auto get_bucket_counts() { return bucket_counts_; }
void serialize(std::string& str) override {
serialize_head(str);
double count = 0;
auto bucket_counts = get_bucket_counts();
for (size_t i = 0; i < bucket_counts.size(); i++) {
auto counter = bucket_counts[i];
str.append(name_).append("_bucket{");
if (i == bucket_boundaries_.size()) {
str.append("le=\"").append("+Inf").append("\"} ");
}
else {
str.append("le=\"")
.append(std::to_string(bucket_boundaries_[i]))
.append("\"} ");
}
count += counter->value();
str.append(std::to_string(count));
str.append("\n");
}
str.append(name_)
.append("_sum ")
.append(std::to_string(sum_->value()))
.append("\n");
str.append(name_)
.append("_count ")
.append(std::to_string(count))
.append("\n");
}
private:
template <class ForwardIterator>
bool is_strict_sorted(ForwardIterator first, ForwardIterator last) {
return std::adjacent_find(first, last,
std::greater_equal<typename std::iterator_traits<
ForwardIterator>::value_type>()) == last;
}
std::vector<double> bucket_boundaries_;
std::vector<std::shared_ptr<counter_t>> bucket_counts_; // readonly
std::shared_ptr<gauge_t> sum_;
};
} // namespace ylt

View File

@ -0,0 +1,316 @@
#pragma once
#include <atomic>
#include <cassert>
#include <map>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <string>
#include <vector>
#include "async_simple/coro/Lazy.h"
#include "cinatra/cinatra_log_wrapper.hpp"
namespace ylt {
enum class MetricType {
Counter,
Gauge,
Histogram,
Summary,
Nil,
};
class metric_t {
public:
metric_t() = default;
metric_t(MetricType type, std::string name, std::string help,
std::vector<std::string> labels_name = {})
: type_(type),
name_(std::move(name)),
help_(std::move(help)),
labels_name_(std::move(labels_name)) {}
virtual ~metric_t() {}
std::string_view name() { return name_; }
std::string_view help() { return help_; }
MetricType metric_type() { return type_; }
std::string_view metric_name() {
switch (type_) {
case MetricType::Counter:
return "counter";
case MetricType::Gauge:
return "gauge";
case MetricType::Histogram:
return "histogram";
case MetricType::Summary:
return "summary";
case MetricType::Nil:
default:
return "unknown";
}
}
const std::vector<std::string>& labels_name() { return labels_name_; }
virtual void serialize(std::string& str) {}
// only for summary
virtual async_simple::coro::Lazy<void> serialize_async(std::string& out) {
co_return;
}
bool is_atomic() const { return use_atomic_; }
template <typename T>
T* as() {
return dynamic_cast<T*>(this);
}
protected:
void set_metric_type(MetricType type) { type_ = type; }
void serialize_head(std::string& str) {
str.append("# HELP ").append(name_).append(" ").append(help_).append("\n");
str.append("# TYPE ")
.append(name_)
.append(" ")
.append(metric_name())
.append("\n");
}
#ifdef __APPLE__
double mac_os_atomic_fetch_add(std::atomic<double>* obj, double arg) {
double v;
do {
v = obj->load();
} while (!std::atomic_compare_exchange_weak(obj, &v, v + arg));
return v;
}
double mac_os_atomic_fetch_sub(std::atomic<double>* obj, double arg) {
double v;
do {
v = obj->load();
} while (!std::atomic_compare_exchange_weak(obj, &v, v - arg));
return v;
}
#endif
MetricType type_ = MetricType::Nil;
std::string name_;
std::string help_;
std::vector<std::string> labels_name_; // read only
std::vector<std::string> labels_value_; // read only
bool use_atomic_ = false;
};
template <size_t ID = 0>
struct metric_manager_t {
struct null_mutex_t {
void lock() {}
void unlock() {}
};
// create and register metric
template <typename T, typename... Args>
static std::shared_ptr<T> create_metric_static(const std::string& name,
const std::string& help,
Args&&... args) {
auto m = std::make_shared<T>(name, help, std::forward<Args>(args)...);
bool r = register_metric_static(m);
if (!r) {
return nullptr;
}
return m;
}
template <typename T, typename... Args>
static std::shared_ptr<T> create_metric_dynamic(const std::string& name,
const std::string& help,
Args&&... args) {
auto m = std::make_shared<T>(name, help, std::forward<Args>(args)...);
bool r = register_metric_static(m);
if (!r) {
return nullptr;
}
return m;
}
static bool register_metric_dynamic(std::shared_ptr<metric_t> metric) {
return register_metric_impl<true>(metric);
}
static bool register_metric_static(std::shared_ptr<metric_t> metric) {
return register_metric_impl<false>(metric);
}
template <typename... Metrics>
static bool register_metric_dynamic(Metrics... metrics) {
bool r = true;
((void)(r && (r = register_metric_impl<true>(metrics), true)), ...);
return r;
}
template <typename... Metrics>
static bool register_metric_static(Metrics... metrics) {
bool r = true;
((void)(r && (r = register_metric_impl<false>(metrics), true)), ...);
return r;
}
static auto metric_map_static() { return metric_map_impl<false>(); }
static auto metric_map_dynamic() { return metric_map_impl<true>(); }
static size_t metric_count_static() { return metric_count_impl<false>(); }
static size_t metric_count_dynamic() { return metric_count_impl<true>(); }
static std::vector<std::string> metric_keys_static() {
return metric_keys_impl<false>();
}
static std::vector<std::string> metric_keys_dynamic() {
return metric_keys_impl<true>();
}
template <typename T>
static T* get_metric_static(const std::string& name) {
auto m = get_metric_impl<false>(name);
if (m == nullptr) {
return nullptr;
}
return m->template as<T>();
}
template <typename T>
static T* get_metric_dynamic(const std::string& name) {
auto m = get_metric_impl<true>(name);
if (m == nullptr) {
return nullptr;
}
return m->template as<T>();
}
static async_simple::coro::Lazy<std::string> serialize_static() {
return serialize_impl<false>();
}
static async_simple::coro::Lazy<std::string> serialize_dynamic() {
return serialize_impl<true>();
}
private:
template <bool need_lock>
static void check_lock() {
if (need_lock_ != need_lock) {
std::string str = "need lock ";
std::string s = need_lock_ ? "true" : "false";
std::string r = need_lock ? "true" : "false";
str.append(s).append(" but set as ").append(r);
throw std::invalid_argument(str);
}
}
template <bool need_lock = true>
static auto get_lock() {
check_lock<need_lock>();
if constexpr (need_lock) {
return std::scoped_lock(mtx_);
}
else {
return std::scoped_lock(null_mtx_);
}
}
template <bool need_lock>
static bool register_metric_impl(std::shared_ptr<metric_t> metric) {
// the first time regiter_metric will set metric_manager_t lock or not lock.
// visit metric_manager_t with different lock strategy will cause throw
// exception.
std::call_once(flag_, [] {
need_lock_ = need_lock;
});
std::string name(metric->name());
auto lock = get_lock<need_lock>();
bool r = metric_map_.emplace(name, std::move(metric)).second;
if (!r) {
CINATRA_LOG_ERROR << "duplicate registered metric name: " << name;
}
return r;
}
template <bool need_lock>
static auto metric_map_impl() {
auto lock = get_lock<need_lock>();
return metric_map_;
}
template <bool need_lock>
static size_t metric_count_impl() {
auto lock = get_lock<need_lock>();
return metric_map_.size();
}
template <bool need_lock>
static std::vector<std::string> metric_keys_impl() {
std::vector<std::string> keys;
{
auto lock = get_lock<need_lock>();
for (auto& pair : metric_map_) {
keys.push_back(pair.first);
}
}
return keys;
}
template <bool need_lock>
static std::shared_ptr<metric_t> get_metric_impl(const std::string& name) {
auto lock = get_lock<need_lock>();
auto it = metric_map_.find(name);
if (it == metric_map_.end()) {
return nullptr;
}
return it->second;
}
template <bool need_lock>
static auto collect() {
std::vector<std::shared_ptr<metric_t>> metrics;
{
auto lock = get_lock<need_lock>();
for (auto& pair : metric_map_) {
metrics.push_back(pair.second);
}
}
return metrics;
}
template <bool need_lock = true>
static async_simple::coro::Lazy<std::string> serialize_impl() {
std::string str;
auto metrics = collect<need_lock>();
for (auto& m : metrics) {
if (m->metric_type() == MetricType::Summary) {
co_await m->serialize_async(str);
}
else {
m->serialize(str);
}
}
co_return str;
}
static inline std::mutex mtx_;
static inline std::map<std::string, std::shared_ptr<metric_t>> metric_map_;
static inline null_mutex_t null_mtx_;
static inline std::atomic_bool need_lock_ = true;
static inline std::once_flag flag_;
};
using default_metric_manager = metric_manager_t<0>;
} // namespace ylt

View File

@ -0,0 +1,139 @@
#pragma once
#include <atomic>
#include "detail/time_window_quantiles.hpp"
#include "metric.hpp"
#include "ylt/coro_io/coro_io.hpp"
#include "ylt/util/concurrentqueue.h"
namespace ylt {
class summary_t : public metric_t {
public:
using Quantiles = std::vector<CKMSQuantiles::Quantile>;
summary_t(std::string name, std::string help, Quantiles quantiles,
std::chrono::milliseconds max_age = std::chrono::seconds{60},
int age_buckets = 5)
: quantiles_{std::move(quantiles)},
metric_t(MetricType::Summary, std::move(name), std::move(help)) {
work_ = std::make_shared<asio::io_context::work>(ctx_);
thd_ = std::thread([this] {
ctx_.run();
});
excutor_ =
std::make_unique<coro_io::ExecutorWrapper<>>(ctx_.get_executor());
block_ = std::make_shared<block_t>();
block_->quantile_values_ =
std::make_shared<TimeWindowQuantiles>(quantiles_, max_age, age_buckets);
start_timer(block_).via(excutor_.get()).start([](auto &&) {
});
}
~summary_t() {
block_->stop_ = true;
work_ = nullptr;
thd_.join();
}
struct block_t {
std::atomic<bool> stop_ = false;
moodycamel::ConcurrentQueue<double> sample_queue_;
std::shared_ptr<TimeWindowQuantiles> quantile_values_;
std::uint64_t count_;
double sum_;
};
void observe(double value) { block_->sample_queue_.enqueue(value); }
async_simple::coro::Lazy<std::vector<double>> get_rates(double &sum,
uint64_t &count) {
std::vector<double> vec;
if (quantiles_.empty()) {
co_return std::vector<double>{};
}
co_await coro_io::post([this, &vec, &sum, &count] {
sum = block_->sum_;
count = block_->count_;
for (const auto &quantile : quantiles_) {
vec.push_back(block_->quantile_values_->get(quantile.quantile));
}
});
co_return vec;
}
async_simple::coro::Lazy<double> get_sum() {
auto ret = co_await coro_io::post([this] {
return block_->sum_;
});
co_return ret.value();
}
async_simple::coro::Lazy<uint64_t> get_count() {
auto ret = co_await coro_io::post([this] {
return block_->count_;
});
co_return ret.value();
}
size_t size_approx() { return block_->sample_queue_.size_approx(); }
async_simple::coro::Lazy<void> serialize_async(std::string &str) override {
if (quantiles_.empty()) {
co_return;
}
serialize_head(str);
double sum = 0;
uint64_t count = 0;
auto rates = co_await get_rates(sum, count);
for (size_t i = 0; i < quantiles_.size(); i++) {
str.append(name_);
str.append("{quantile=\"");
str.append(std::to_string(quantiles_[i].quantile)).append("\"} ");
str.append(std::to_string(rates[i])).append("\n");
}
str.append(name_).append("_sum ").append(std::to_string(sum)).append("\n");
str.append(name_)
.append("_count ")
.append(std::to_string((uint64_t)count))
.append("\n");
}
private:
async_simple::coro::Lazy<void> start_timer(std::shared_ptr<block_t> block) {
double sample;
size_t count = 1000000;
while (!block->stop_) {
size_t index = 0;
while (block->sample_queue_.try_dequeue(sample)) {
block->quantile_values_->insert(sample);
block->count_ += 1;
block->sum_ += sample;
index++;
if (index == count) {
break;
}
}
co_await async_simple::coro::Yield{};
if (block->sample_queue_.size_approx() == 0) {
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
co_return;
}
Quantiles quantiles_; // readonly
std::shared_ptr<block_t> block_;
std::unique_ptr<coro_io::ExecutorWrapper<>> excutor_ = nullptr;
std::shared_ptr<asio::io_context::work> work_;
asio::io_context ctx_;
std::thread thd_;
};
} // namespace ylt

View File

@ -21,9 +21,14 @@
#include "sha1.hpp"
#include "string_resize.hpp"
#include "websocket.hpp"
#include "ylt/metric/counter.hpp"
#include "ylt/metric/gauge.hpp"
#include "ylt/metric/histogram.hpp"
#include "ylt/metric/metric.hpp"
#ifdef CINATRA_ENABLE_GZIP
#include "gzip.hpp"
#endif
#include "metric_conf.hpp"
#include "ylt/coro_io/coro_file.hpp"
#include "ylt/coro_io/coro_io.hpp"
@ -47,9 +52,14 @@ class coro_http_connection
request_(parser_, this),
response_(this) {
buffers_.reserve(3);
cinatra_metric_conf::server_total_fd_inc();
}
~coro_http_connection() { close(); }
~coro_http_connection() {
cinatra_metric_conf::server_total_fd_dec();
close();
}
#ifdef CINATRA_ENABLE_SSL
bool init_ssl(const std::string &cert_file, const std::string &key_file,
@ -94,6 +104,8 @@ class coro_http_connection
#ifdef CINATRA_ENABLE_SSL
bool has_shake = false;
#endif
std::chrono::system_clock::time_point start{};
std::chrono::system_clock::time_point mid{};
while (true) {
#ifdef CINATRA_ENABLE_SSL
if (use_ssl_ && !has_shake) {
@ -113,13 +125,21 @@ class coro_http_connection
if (ec != asio::error::eof) {
CINATRA_LOG_WARNING << "read http header error: " << ec.message();
}
cinatra_metric_conf::server_failed_req_inc();
close();
break;
}
if (cinatra_metric_conf::enable_metric) {
start = std::chrono::system_clock::now();
cinatra_metric_conf::server_total_req_inc();
}
const char *data_ptr = asio::buffer_cast<const char *>(head_buf_.data());
int head_len = parser_.parse_request(data_ptr, size, 0);
if (head_len <= 0) {
cinatra_metric_conf::server_failed_req_inc();
CINATRA_LOG_ERROR << "parse http header error";
close();
break;
@ -133,6 +153,9 @@ class coro_http_connection
if (type != content_type::chunked && type != content_type::multipart) {
size_t body_len = parser_.body_len();
if (body_len == 0) {
if (cinatra_metric_conf::enable_metric) {
cinatra_metric_conf::server_total_recv_bytes_inc(head_len);
}
if (parser_.method() == "GET"sv) {
if (request_.is_upgrade()) {
#ifdef CINATRA_ENABLE_GZIP
@ -152,6 +175,16 @@ class coro_http_connection
}
response_.set_delay(true);
}
else {
if (cinatra_metric_conf::enable_metric) {
mid = std::chrono::system_clock::now();
double count =
std::chrono::duration_cast<std::chrono::microseconds>(mid -
start)
.count();
cinatra_metric_conf::server_read_latency_observe(count);
}
}
}
}
else if (body_len <= head_buf_.size()) {
@ -161,6 +194,7 @@ class coro_http_connection
memcpy(body_.data(), data_ptr, body_len);
head_buf_.consume(head_buf_.size());
}
cinatra_metric_conf::server_total_recv_bytes_inc(head_len + body_len);
}
else {
size_t part_size = head_buf_.size();
@ -175,9 +209,22 @@ class coro_http_connection
size_to_read);
if (ec) {
CINATRA_LOG_ERROR << "async_read error: " << ec.message();
cinatra_metric_conf::server_failed_req_inc();
close();
break;
}
else {
if (cinatra_metric_conf::enable_metric) {
cinatra_metric_conf::server_total_recv_bytes_inc(head_len +
body_len);
mid = std::chrono::system_clock::now();
double count =
std::chrono::duration_cast<std::chrono::microseconds>(mid -
start)
.count();
cinatra_metric_conf::server_read_latency_observe(count);
}
}
}
}
@ -362,6 +409,14 @@ class coro_http_connection
}
}
if (cinatra_metric_conf::enable_metric) {
mid = std::chrono::system_clock::now();
double count =
std::chrono::duration_cast<std::chrono::microseconds>(mid - start)
.count();
cinatra_metric_conf::server_req_latency_observe(count);
}
response_.clear();
request_.clear();
buffers_.clear();
@ -375,18 +430,32 @@ class coro_http_connection
}
async_simple::coro::Lazy<bool> reply(bool need_to_bufffer = true) {
if (response_.status() >= status_type::bad_request) {
if (cinatra_metric_conf::enable_metric)
cinatra_metric_conf::server_failed_req_inc();
}
std::error_code ec;
size_t size;
if (multi_buf_) {
if (need_to_bufffer) {
response_.to_buffers(buffers_, chunk_size_str_);
}
int64_t send_size = 0;
for (auto &buf : buffers_) {
send_size += buf.size();
}
if (cinatra_metric_conf::enable_metric) {
cinatra_metric_conf::server_total_send_bytes_inc(send_size);
}
std::tie(ec, size) = co_await async_write(buffers_);
}
else {
if (need_to_bufffer) {
response_.build_resp_str(resp_str_);
}
if (cinatra_metric_conf::enable_metric) {
cinatra_metric_conf::server_total_send_bytes_inc(resp_str_.size());
}
std::tie(ec, size) = co_await async_write(asio::buffer(resp_str_));
}
@ -794,7 +863,7 @@ class coro_http_connection
return last_rwtime_;
}
auto &get_executor() { return *executor_; }
auto get_executor() { return executor_; }
void close(bool need_cb = true) {
if (has_closed_) {
@ -884,7 +953,7 @@ class coro_http_connection
private:
friend class multipart_reader_t<coro_http_connection>;
async_simple::Executor *executor_;
coro_io::ExecutorWrapper<> *executor_;
asio::ip::tcp::socket socket_;
coro_http_router &router_;
asio::streambuf head_buf_;

View File

@ -11,6 +11,7 @@
#include "ylt/coro_io/coro_file.hpp"
#include "ylt/coro_io/coro_io.hpp"
#include "ylt/coro_io/io_context_pool.hpp"
#include "ylt/metric/metric.hpp"
namespace cinatra {
enum class file_resp_format_type {
@ -181,6 +182,16 @@ class coro_http_server {
}
}
void use_metrics(std::string url_path = "/metrics") {
init_metrics();
set_http_handler<http_method::GET>(
url_path, [](coro_http_request &req, coro_http_response &res) {
std::string str = async_simple::coro::syncAwait(
ylt::default_metric_manager::serialize_static());
res.set_status_and_content(status_type::ok, std::move(str));
});
}
template <http_method... method, typename... Aspects>
void set_http_proxy_handler(std::string url_path,
std::vector<std::string_view> hosts,
@ -684,7 +695,7 @@ class coro_http_server {
connections_.emplace(conn_id, conn);
}
start_one(conn).via(&conn->get_executor()).detach();
start_one(conn).via(conn->get_executor()).detach();
}
}
@ -868,6 +879,7 @@ class coro_http_server {
easylog::logger<>::instance(); // init easylog singleton to make sure
// server destruct before easylog.
#endif
if (size_t pos = address.find(':'); pos != std::string::npos) {
auto port_sv = std::string_view(address).substr(pos + 1);
@ -886,6 +898,29 @@ class coro_http_server {
address_ = std::move(address);
}
private:
void init_metrics() {
using namespace ylt;
cinatra_metric_conf::enable_metric = true;
default_metric_manager::create_metric_static<counter_t>(
cinatra_metric_conf::server_total_req, "");
default_metric_manager::create_metric_static<counter_t>(
cinatra_metric_conf::server_failed_req, "");
default_metric_manager::create_metric_static<counter_t>(
cinatra_metric_conf::server_total_recv_bytes, "");
default_metric_manager::create_metric_static<counter_t>(
cinatra_metric_conf::server_total_send_bytes, "");
default_metric_manager::create_metric_static<gauge_t>(
cinatra_metric_conf::server_total_fd, "");
default_metric_manager::create_metric_static<histogram_t>(
cinatra_metric_conf::server_req_latency, "",
std::vector<double>{30, 40, 50, 60, 70, 80, 90, 100, 150});
default_metric_manager::create_metric_static<histogram_t>(
cinatra_metric_conf::server_read_latency, "",
std::vector<double>{3, 5, 7, 9, 13, 18, 23, 35, 50});
}
private:
std::unique_ptr<coro_io::io_context_pool> pool_;
asio::io_context *out_ctx_ = nullptr;

View File

@ -0,0 +1,128 @@
#pragma once
#include <atomic>
#include <string>
#include "ylt/metric/counter.hpp"
#include "ylt/metric/gauge.hpp"
#include "ylt/metric/histogram.hpp"
#include "ylt/metric/metric.hpp"
#include "ylt/metric/summary.hpp"
namespace cinatra {
struct cinatra_metric_conf {
inline static std::string server_total_req = "server_total_req";
inline static std::string server_failed_req = "server_failed_req";
inline static std::string server_total_fd = "server_total_fd";
inline static std::string server_total_recv_bytes = "server_total_recv_bytes";
inline static std::string server_total_send_bytes = "server_total_send_bytes";
inline static std::string server_req_latency = "server_req_latency";
inline static std::string server_read_latency = "server_read_latency";
inline static std::string server_total_thread_num = "server_total_thread_num";
inline static bool enable_metric = false;
inline static void server_total_req_inc() {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::counter_t>(
server_total_req);
if (m == nullptr) {
return;
}
m->inc();
}
inline static void server_failed_req_inc() {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::counter_t>(
server_failed_req);
if (m == nullptr) {
return;
}
m->inc();
}
inline static void server_total_fd_inc() {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::gauge_t>(
server_total_fd);
if (m == nullptr) {
return;
}
m->inc();
}
inline static void server_total_fd_dec() {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::gauge_t>(
server_total_fd);
if (m == nullptr) {
return;
}
m->dec();
}
inline static void server_total_recv_bytes_inc(double val) {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::counter_t>(
server_total_recv_bytes);
if (m == nullptr) {
return;
}
m->inc(val);
}
inline static void server_total_send_bytes_inc(double val) {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::counter_t>(
server_total_send_bytes);
if (m == nullptr) {
return;
}
m->inc(val);
}
inline static void server_req_latency_observe(double val) {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::histogram_t>(
server_req_latency);
if (m == nullptr) {
return;
}
m->observe(val);
}
inline static void server_read_latency_observe(double val) {
if (!enable_metric) {
return;
}
static auto m =
ylt::default_metric_manager::get_metric_static<ylt::histogram_t>(
server_read_latency);
if (m == nullptr) {
return;
}
m->observe(val);
}
};
} // namespace cinatra

View File

@ -0,0 +1,5 @@
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/output/tests)
add_executable(metric_test
test_metric.cpp
)
add_test(NAME metric_test COMMAND metric_test)

View File

@ -0,0 +1,280 @@
#include "ylt/metric/gauge.hpp"
#define DOCTEST_CONFIG_IMPLEMENT
#include <random>
#include "doctest.h"
#include "ylt/metric/counter.hpp"
#include "ylt/metric/histogram.hpp"
#include "ylt/metric/summary.hpp"
using namespace ylt;
TEST_CASE("test no lable") {
{
gauge_t g{"test_gauge", "help"};
g.inc();
g.inc();
std::string str;
g.serialize(str);
CHECK(str.find("test_gauge 2") != std::string::npos);
g.dec();
CHECK(g.value() == 1);
CHECK_THROWS_AS(g.dec({}, 1), std::invalid_argument);
CHECK_THROWS_AS(g.inc({}, 1), std::invalid_argument);
CHECK_THROWS_AS(g.update({}, 1), std::invalid_argument);
counter_t c{"test_counter", "help"};
c.inc();
c.inc();
std::string str1;
c.serialize(str1);
CHECK(str1.find("test_counter 2") != std::string::npos);
}
{
counter_t c("get_count", "get counter");
CHECK(c.metric_type() == MetricType::Counter);
CHECK(c.labels_name().empty());
c.inc();
CHECK(c.value() == 1);
c.inc();
CHECK(c.value() == 2);
c.inc(0);
CHECK(c.value() == 2);
CHECK_THROWS_AS(c.inc(-2), std::invalid_argument);
CHECK_THROWS_AS(c.inc({}, 1), std::invalid_argument);
CHECK_THROWS_AS(c.update({}, 1), std::invalid_argument);
c.update(10);
CHECK(c.value() == 10);
c.update(0);
CHECK(c.value() == 0);
}
}
TEST_CASE("test with atomic") {
counter_t c(
"get_count", "get counter",
std::map<std::string, std::string>{{"method", "GET"}, {"url", "/"}});
std::vector<std::string> labels_value{"GET", "/"};
c.inc(labels_value);
c.inc(labels_value, 2);
CHECK(c.value(labels_value) == 3);
CHECK_THROWS_AS(c.inc({"GET", "/test"}), std::invalid_argument);
CHECK_THROWS_AS(c.inc({"POST", "/"}), std::invalid_argument);
c.update(labels_value, 10);
CHECK(c.value(labels_value) == 10);
gauge_t g(
"get_qps", "get qps",
std::map<std::string, std::string>{{"method", "GET"}, {"url", "/"}});
g.inc(labels_value);
g.inc(labels_value, 2);
CHECK(g.value(labels_value) == 3);
CHECK_THROWS_AS(g.inc({"GET", "/test"}), std::invalid_argument);
CHECK_THROWS_AS(g.inc({"POST", "/"}), std::invalid_argument);
g.dec(labels_value);
g.dec(labels_value, 1);
CHECK(g.value(labels_value) == 1);
std::string str;
c.serialize(str);
std::cout << str;
std::string str1;
g.serialize(str1);
std::cout << str1;
CHECK(str.find("} 10") != std::string::npos);
CHECK(str1.find("} 1") != std::string::npos);
}
TEST_CASE("test counter with dynamic labels value") {
{
auto c = std::make_shared<counter_t>(
"get_count", "get counter", std::vector<std::string>{"method", "code"});
CHECK(c->name() == "get_count");
auto g = std::make_shared<gauge_t>(
"get_count", "get counter", std::vector<std::string>{"method", "code"});
CHECK(g->name() == "get_count");
CHECK(g->metric_name() == "gauge");
}
{
counter_t c("get_count", "get counter",
std::vector<std::string>{"method", "code"});
CHECK(c.labels_name() == std::vector<std::string>{"method", "code"});
c.inc({"GET", "200"}, 1);
auto values = c.value_map();
CHECK(values[{"GET", "200"}] == 1);
c.inc({"GET", "200"}, 2);
values = c.value_map();
CHECK(values[{"GET", "200"}] == 3);
std::string str;
c.serialize(str);
std::cout << str;
CHECK(str.find("# TYPE get_count counter") != std::string::npos);
CHECK(str.find("get_count{method=\"GET\",code=\"200\"} 3") !=
std::string::npos);
CHECK_THROWS_AS(c.inc({"GET", "200", "/"}, 2), std::invalid_argument);
c.update({"GET", "200"}, 20);
std::this_thread::sleep_for(std::chrono::milliseconds(10));
values = c.value_map();
CHECK(values[{"GET", "200"}] == 20);
}
}
TEST_CASE("test gauge") {
{
gauge_t g("get_count", "get counter");
CHECK(g.metric_type() == MetricType::Gauge);
CHECK(g.labels_name().empty());
g.inc();
CHECK(g.value() == 1);
g.inc();
CHECK(g.value() == 2);
g.inc(0);
g.dec();
CHECK(g.value() == 1);
g.dec();
CHECK(g.value() == 0);
}
{
gauge_t g("get_count", "get counter", {"method", "code", "url"});
CHECK(g.labels_name() == std::vector<std::string>{"method", "code", "url"});
// method, status code, url
g.inc({"GET", "200", "/"}, 1);
auto values = g.value_map();
CHECK(values[{"GET", "200", "/"}] == 1);
g.inc({"GET", "200", "/"}, 2);
values = g.value_map();
CHECK(values[{"GET", "200", "/"}] == 3);
std::string str;
g.serialize(str);
std::cout << str;
CHECK(str.find("# TYPE get_count gauge") != std::string::npos);
CHECK(str.find("get_count{method=\"GET\",code=\"200\",url=\"/\"} 3") !=
std::string::npos);
CHECK_THROWS_AS(g.dec({"GET", "200"}, 1), std::invalid_argument);
g.dec({"GET", "200", "/"}, 1);
values = g.value_map();
CHECK(values[{"GET", "200", "/"}] == 2);
g.dec({"GET", "200", "/"}, 2);
values = g.value_map();
CHECK(values[{"GET", "200", "/"}] == 0);
}
}
TEST_CASE("test histogram") {
histogram_t h("test", "help", {5.0, 10.0, 20.0, 50.0, 100.0});
h.observe(23);
auto counts = h.get_bucket_counts();
CHECK(counts[3]->value() == 1);
h.observe(42);
CHECK(counts[3]->value() == 2);
h.observe(60);
CHECK(counts[4]->value() == 1);
h.observe(120);
CHECK(counts[5]->value() == 1);
h.observe(1);
CHECK(counts[0]->value() == 1);
std::string str;
h.serialize(str);
std::cout << str;
CHECK(str.find("test_count") != std::string::npos);
CHECK(str.find("test_sum") != std::string::npos);
CHECK(str.find("test_bucket{le=\"5") != std::string::npos);
CHECK(str.find("test_bucket{le=\"+Inf\"}") != std::string::npos);
}
TEST_CASE("test summary") {
summary_t summary{"test_summary",
"summary help",
{{0.5, 0.05}, {0.9, 0.01}, {0.95, 0.005}, {0.99, 0.001}}};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distr(1, 100);
for (int i = 0; i < 50; i++) {
summary.observe(distr(gen));
}
std::this_thread::sleep_for(std::chrono::milliseconds(100));
std::string str;
async_simple::coro::syncAwait(summary.serialize_async(str));
std::cout << str;
CHECK(async_simple::coro::syncAwait(summary.get_count()) == 50);
CHECK(async_simple::coro::syncAwait(summary.get_sum()) > 0);
CHECK(str.find("test_summary") != std::string::npos);
CHECK(str.find("test_summary_count") != std::string::npos);
CHECK(str.find("test_summary_sum") != std::string::npos);
CHECK(str.find("test_summary{quantile=\"") != std::string::npos);
}
TEST_CASE("test register metric") {
auto c = std::make_shared<counter_t>(std::string("get_count"),
std::string("get counter"));
default_metric_manager::register_metric_static(c);
CHECK_FALSE(default_metric_manager::register_metric_static(c));
auto g = std::make_shared<gauge_t>(std::string("get_guage_count"),
std::string("get counter"));
default_metric_manager::register_metric_static(g);
auto map1 = default_metric_manager::metric_map_static();
for (auto& [k, v] : map1) {
bool r = k == "get_count" || k == "get_guage_count";
break;
}
CHECK(default_metric_manager::metric_count_static() >= 2);
CHECK(default_metric_manager::metric_keys_static().size() >= 2);
c->inc();
g->inc();
auto map = default_metric_manager::metric_map_static();
CHECK(map["get_count"]->as<counter_t>()->value() == 1);
CHECK(map["get_guage_count"]->as<gauge_t>()->value() == 1);
auto s =
async_simple::coro::syncAwait(default_metric_manager::serialize_static());
std::cout << s << "\n";
CHECK(s.find("get_count 1") != std::string::npos);
CHECK(s.find("get_guage_count 1") != std::string::npos);
auto m = default_metric_manager::get_metric_static<counter_t>("get_count");
CHECK(m->as<counter_t>()->value() == 1);
auto m1 =
default_metric_manager::get_metric_static<gauge_t>("get_guage_count");
CHECK(m1->as<gauge_t>()->value() == 1);
{
// because the first regiter_metric is set no lock, so visit
// default_metric_manager with lock will throw.
auto c1 = std::make_shared<counter_t>(std::string(""), std::string(""));
CHECK_THROWS_AS(default_metric_manager::register_metric_dynamic(c1),
std::invalid_argument);
CHECK_THROWS_AS(default_metric_manager::metric_count_dynamic(),
std::invalid_argument);
CHECK_THROWS_AS(default_metric_manager::metric_keys_dynamic(),
std::invalid_argument);
CHECK_THROWS_AS(default_metric_manager::metric_map_dynamic(),
std::invalid_argument);
CHECK_THROWS_AS(default_metric_manager::get_metric_dynamic<counter_t>(""),
std::invalid_argument);
}
}
DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4007)
int main(int argc, char** argv) { return doctest::Context(argc, argv).run(); }
DOCTEST_MSVC_SUPPRESS_WARNING_POP

View File

@ -35,6 +35,10 @@ export const struct_xxx_Links = [
{ text: 'struct_xml struct_json struct_yaml 简介', link: '/zh/struct_xxx/struct_xxx_introduction' },
];
export const metric_Links = [
{ text: 'metric简介', link: '/zh/metric/metric_introduction' },
];
export const aboutLinks = [
{ text: 'purecpp', link: '/zh/about/community' }, //TODO 未来支持英语
{ text: 'contribute', link: '/zh/about/contribute' },

View File

@ -15,6 +15,7 @@ export const zh_themeConfig = <DefaultTheme.Config>
{ text: 'easylog', items: data.easylog_Links },
{ text: 'coro_http', items: data.coro_http_Links },
{ text: 'struct_xxx', items: data.struct_xxx_Links },
{ text: 'metric', items: data.metric_Links },
]
};

View File

@ -1,6 +1,6 @@
<p align="center">
<h1 align="center">yaLanTingLibs</h1>
<h6 align="center">C++20基础工具库集合包括struct_pack, struct_json, struct_xml, struct_pb, easylog, coro_rpc, coro_http 和 async_simple </h6>
<h6 align="center">C++20基础工具库集合包括struct_pack, struct_json, struct_xml, struct_pb, easylog, coro_rpc, coro_http, metric 和 async_simple </h6>
</p>
<p align="center">
<img alt="license" src="https://img.shields.io/github/license/alibaba/async_simple?style=flat-square">
@ -10,7 +10,7 @@
[English Version](../../en/guide/what_is_yalantinglibs.md)
yaLanTingLibs 是一个现代C++基础工具库的集合, 现在它包括 struct_pack, struct_json, struct_xml, struct_yaml, struct_pb, easylog, coro_rpc, coro_io, coro_http 和 async_simple, 目前我们正在开发并添加更多的新功能。
yaLanTingLibs 是一个现代C++基础工具库的集合, 现在它包括 struct_pack, struct_json, struct_xml, struct_yaml, struct_pb, easylog, coro_rpc, coro_io, coro_http, metric 和 async_simple, 目前我们正在开发并添加更多的新功能。
yaLanTingLibs 的目标: 为C++开发者提供高性能极度易用的现代C++基础工具库, 帮助用户构建高性能的现代C++应用。
@ -430,6 +430,10 @@ yalantinglibs工程自身支持如下配置项如果你使用cmake find_packa
无依赖。
### metric
无依赖。
## 独立子仓库
coro_http 由独立子仓库实现: [cinatra](https://github.com/qicosmos/cinatra)

View File

@ -28,4 +28,6 @@ features:
details: C++17 实现的高性能易用的日志库, 支持cout 流式、sprintf 和 fmt::format/std::format 输出.
- title: struct_xml struct_json struct_yaml
details: C++17 实现的高性能易用的序列化库, 支持xml, json和yaml 的序列化/反序列化.
- title: metric
details: metric 介绍
---

View File

@ -0,0 +1,511 @@
# metric 介绍
metric 用于统计应用程序的各种指标这些指标被用于系统见识和警报常见的指标类型有四种Counter、Gauge、Histogram和Summary这些指标遵循[Prometheus](https://hulining.gitbook.io/prometheus/introduction)的数据格式。
## Counter 计数器类型
Counter是一个累计类型的数据指标它代表单调递增的计数器其值只能在重新启动时增加或重置为 0。例如您可以使用计数器来表示已响应的请求数已完成或出错的任务数。
不要使用计数器来显示可以减小的值。例如,请不要使用计数器表示当前正在运行的进程数;使用 gauge 代替。
## Gauge 数据轨迹类型
Gauge 是可以任意上下波动数值的指标类型。
Gauge 通常用于测量值,例如温度或当前的内存使用量,还可用于可能上下波动的"计数",例如请求并发数。
如:
```
# HELP node_cpu Seconds the cpus spent in each mode.
# TYPE node_cpu counter
node_cpu{cpu="cpu0",mode="idle"} 362812.7890625
# HELP node_load1 1m load average.
# TYPE node_load1 gauge
node_load1 3.0703125
```
## Histogram 直方图类型
Histogram 对观测值(通常是请求持续时间或响应大小之类的数据)进行采样,并将其计数在可配置的数值区间中。它也提供了所有数据的总和。
基本数据指标名称为<basename>的直方图类型数据指标,在数据采集期间会显示多个时间序列:
数值区间的累计计数器,显示为<basename>_bucket{le="<数值区间的上边界>"}
所有观测值的总和,显示为<basename>_sum
统计到的事件计数,显示为<basename>_count(与上述<basename>_bucket{le="+Inf"}相同)
如:
```
# A histogram, which has a pretty complex representation in the text format:
# HELP http_request_duration_seconds A histogram of the request duration.
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.05"} 24054
http_request_duration_seconds_bucket{le="0.1"} 33444
http_request_duration_seconds_bucket{le="0.2"} 100392
http_request_duration_seconds_bucket{le="+Inf"} 144320
http_request_duration_seconds_sum 53423
http_request_duration_seconds_count 144320
```
## Summary 汇总类型
类似于 histogramsummary 会采样观察结果(通常是请求持续时间和响应大小之类的数据)。它不仅提供了观测值的总数和所有观测值的总和,还可以计算滑动时间窗口内的可配置分位数。
基本数据指标名称为<basename>的 summary 类型数据指标,在数据采集期间会显示多个时间序列:
流观察到的事件的 φ-quantiles(0≤φ≤1),显示为<basename>{quantile="<φ>"}
所有观测值的总和,显示为<basename>_sum
观察到的事件计数,显示为<basename>_count
如:
```
# HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of WAL fsync.
# TYPE prometheus_tsdb_wal_fsync_duration_seconds summary
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} 0.012352463
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} 0.014458005
prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} 0.017316173
prometheus_tsdb_wal_fsync_duration_seconds_sum 2.888716127000002
prometheus_tsdb_wal_fsync_duration_seconds_count 216
```
# 概述
metric 包括4种指标类型
- couter只会增加的指标
- gauge可以增加或减少的指标它派生于counter
- histogram直方图初始化的时候需要设置桶(bucket)
- summary分位数指标初始化的时候需要设置桶和误差
# label
label标签可选指标可以没有标签。标签是指一个键值对标签的键需要在创建指标的时候设置是静态不可变的。
标签的值可以在创建指标的时候设置这样的label则被称为静态的label。
标签的值在运行期动态创建则label被称为动态的label。
动态label的例子
```cpp
{"method", "url"}
```
这个label只有键没有值所以这个label是动态的label。后续动态生成label对应的值时这样做
```cpp
{"GET", "/"}
{"POST", "/test"}
```
使用的时候填动态的方法名和url就行了
```cpp
some_counter.inc({std::string(req.method()), req.url()}, 1);
```
如果传入的标签值数量和创建时的label 键的数量不匹配时则会抛异常。
静态label的例子
```cpp
{{"method", "GET"}, {"url", "/"}}
```
这个label的键值都确定了是静态的后面使用的时候需要显式调用静态的标签值使用:
```cpp
some_counter.inc({"GET", "/"}, 1);
```
无标签:创建指标的时候不设置标签,内部只有一个计数。
# counter和gauge的使用
## 创建没有标签的指标
```cpp
gauge_t g{"test_gauge", "help"};
g.inc();
g.inc(1);
std::string str;
g.serialize(str);
CHECK(str.find("test_gauge 2") != std::string::npos);
g.dec(1);
CHECK(g.value() == 1);
g.update(1);
CHECK_THROWS_AS(g.dec({"test"}, 1), std::invalid_argument);
CHECK_THROWS_AS(g.inc({"test"}, 1), std::invalid_argument);
CHECK_THROWS_AS(g.update({"test"}, 1), std::invalid_argument);
counter_t c{"test_counter", "help"};
c.inc();
c.inc(1);
std::string str1;
c.serialize(str1);
CHECK(str1.find("test_counter 2") != std::string::npos);
```
## counter/gauge指标的api
构造函数:
```cpp
// 无标签调用inc时不带标签如c.inc()
// name: 指标对象的名称,注册到指标管理器时会使用这个名称
// help: 指标对象的帮助信息
counter_t(std::string name, std::string help);
// labels: 静态标签,构造时需要将标签键值都填完整,如:{{"method", "GET"}, {"url", "/"}}
// 调用inc时必须带静态标签的值c.inc({"GET", "/"}, 1);
counter_t(std::string name, std::string help,
std::map<std::string, std::string> labels);
// labels_name: 动态标签的键名称,因为标签的值是动态的,而键的名称是固定的,所以这里只需要填键名称,如: {"method", "url"}
// 调用时inc时必须带动态标签的值c.inc({method, url}, 1);
counter_t(std::string name, std::string help,
std::vector<std::string> labels_name);
```
基本函数:
```cpp
// 获取无标签指标的计数,
double value();
// 根据标签获取指标的计数,参数为动态或者静态标签的值
double value(const std::vector<std::string> &labels_value);
// 无标签指标增加计数counter的计数只能增加不能减少如果填入的时负数时会抛异常如果需要减少计数的指标则应该使用gauge
void inc(double val = 1);
// 根据标签增加计数,如果创建的指标是静态标签值且和传入的标签值不匹配时会抛异常;如果标签的值的数量和构造指标时传入的标签数量不相等时会抛异常。
void inc(const std::vector<std::string> &labels_value, double value = 1);
// 序列化将指标序列化成prometheus 格式的字符串
void serialize(std::string &str);
// 返回带标签的指标内部的计数mapmap的key是标签的值值是对应计数{{{"GET", "/"}, 100}, {{"POST", "/test"}, 20}}
std::map<std::vector<std::string>, double,
std::less<std::vector<std::string>>>
value_map();
```
注意如果使用动态标签的时候要注意这个动态的标签值是不是无限多的如果是无限多的话那么内部的map也会无限增长应该避免这种情况动态的标签也应该是有限的才对。
gauge 派生于counter相比counter多了一个减少计数的api
```cpp
// 无标签指标减少计数
void dec(double value = 1);
// 根据标签减少计数,如果创建的指标是静态标签值且和传入的标签值不匹配时会抛异常;如果标签的值的数量和构造指标时传入的标签数量不相等时会抛异常。
void dec(const std::vector<std::string>& labels_value, double value = 1);
```
# 基类公共函数
所有指标都派生于metric_t 基类,提供了一些公共方法,如获取指标的名称,指标的类型,标签的键名称等等。
```cpp
class metric_t {
public:
// 获取指标对象的名称
std::string_view name();
// 获取指标对象的help 信息
std::string_view help();
// 指标类型
enum class MetricType {
Counter,
Gauge,
Histogram,
Summary,
Nil,
};
// 获取指标类型
MetricType metric_type();
// 获取指标类型的名称比如counter, gauge, histogram和summary
std::string_view metric_name();
// 获取标签的键,如{"method", "url"}
const std::vector<std::string>& labels_name();
// 序列化,调用派生类实现序列化
virtual void serialize(std::string& str);
// 给summary专用的api序列化调用派生类实现序列化
virtual async_simple::coro::Lazy<void> serialize_async(std::string& out);
// 将基类指针向下转换到派生类指针,如:
// std::shared_ptr<metric_t> c = std::make_shared<counter_t>("test", "test");
// counter_t* t = c->as<counter_t*>();
// t->value();
template <typename T>
T* as();
};
```
# 指标管理器
如果希望集中管理多个指标时,则需要将指标注册到指标管理器,后面则可以多态调用管理器中的多个指标将各自的计数输出出来。
**推荐在一开始就创建所有的指标并注册到管理器**,后面就可以无锁方式根据指标对象的名称来获取指标对象了。
```cpp
auto c = std::make_shared<counter_t>("qps_count", "qps help");
auto g = std::make_shared<gauge_t>("fd_count", "fd count help");
default_metric_manager::register_metric_static(c);
default_metric_manager::register_metric_static(g);
c->inc();
g->inc();
auto m = default_metric_manager::get_metric_static("qps_count");
CHECK(m->as<counter_t>()->value() == 1);
auto m1 = default_metric_manager::get_metric_static("fd_count");
CHECK(m1->as<gauge_t>()->value() == 1);
```
如果希望动态注册的到管理器则应该调用register_metric_dynamic接口后面根据名称获取指标对象时则调用get_metric_dynamic接口dynamic接口内部会加锁。
```cpp
auto c = std::make_shared<counter_t>("qps_count", "qps help");
auto g = std::make_shared<gauge_t>("fd_count", "fd count help");
default_metric_manager::register_metric_dynamic(c);
default_metric_manager::register_metric_dynamic(g);
c->inc();
g->inc();
auto m = default_metric_manager::get_metric_dynamic("qps_count");
CHECK(m->as<counter_t>()->value() == 1);
auto m1 = default_metric_manager::get_metric_dynamic("fd_count");
CHECK(m1->as<gauge_t>()->value() == 1);
```
注意一旦注册时使用了static或者dynamic那么后面调用default_metric_manager时则应该使用相同后缀的接口比如注册时使用了get_metric_static那么后面调用根据名称获取指标对象的方法必须是get_metric_static否则会抛异常。同样如果注册使用register_metric_dynamic则后面应该get_metric_dynamic否则会抛异常。
指标管理器的api
```cpp
template <size_t ID = 0>
struct metric_manager_t {
// 创建并注册指标,返回注册的指标对象
template <typename T, typename... Args>
static std::shared_ptr<T> create_metric_static(const std::string& name,
const std::string& help,
Args&&... args);
template <typename T, typename... Args>
static std::shared_ptr<T> create_metric_dynamic(const std::string& name,
const std::string& help,
Args&&... args)
// 注册metric
static bool register_metric_static(std::shared_ptr<metric_t> metric);
static bool register_metric_dynamic(std::shared_ptr<metric_t> metric);
// 获取注册的所有指标对象
static std::map<std::string, std::shared_ptr<metric_t>> metric_map_static();
static std::map<std::string, std::shared_ptr<metric_t>> metric_map_dynamic();
// 获取注册的指标对象的总数
static size_t metric_count_static();
static size_t metric_count_dynamic();
// 获取注册的指标对象的名称
static std::vector<std::string> metric_keys_static();
static std::vector<std::string> metric_keys_dynamic();
// 根据名称获取指标对象T为具体指标的类型如 get_metric_static<gauge_t>();
// 如果找不到则返回nullptr
template <typename T>
static T* get_metric_static(const std::string& name);
template <typename T>
static T* get_metric_static(const std::string& name);
static std::shared_ptr<metric_t> get_metric_static(const std::string& name);
static std::shared_ptr<metric_t> get_metric_dynamic(const std::string& name);
// 序列化
static async_simple::coro::Lazy<std::string> serialize_static();
static async_simple::coro::Lazy<std::string> serialize_dynamic();
};
using default_metric_manager = metric_manager_t<0>;
```
metric_manager_t默认为default_metric_manager如果希望有多个metric manager用户可以自定义新的metric manager
```cpp
constexpr size_t metric_id = 100;
using my_metric_manager = metric_manager_t<metric_id>;
```
# histogram
## api
```cpp
//
// name: 对象名称help帮助信息
// buckets如 {1, 3, 7, 11, 23},后面设置的值会自动落到某个桶中并增加该桶的计数;
// 内部还有一个+Inf 默认的桶,当输入的数据不在前面设置这些桶中,则会落到+Inf 默认桶中。
// 实际上桶的总数为 buckets.size() + 1
// 每个bucket 实际上对应了一个counter指标
histogram_t(std::string name, std::string help, std::vector<double> buckets);
// 往histogram_t 中插入数据,内部会自动增加对应桶的计数
void observe(double value);
// 获取所有桶对应的counter指标对象
std::vector<std::shared_ptr<counter_t>> get_bucket_counts();
// 序列化
void serialize(std::string& str);
```
## 例子
```cpp
histogram_t h("test", "help", {5.0, 10.0, 20.0, 50.0, 100.0});
h.observe(23);
auto counts = h.get_bucket_counts();
CHECK(counts[3]->value() == 1);
h.observe(42);
CHECK(counts[3]->value() == 2);
h.observe(60);
CHECK(counts[4]->value() == 1);
h.observe(120);
CHECK(counts[5]->value() == 1);
h.observe(1);
CHECK(counts[0]->value() == 1);
std::string str;
h.serialize(str);
std::cout << str;
CHECK(str.find("test_count") != std::string::npos);
CHECK(str.find("test_sum") != std::string::npos);
CHECK(str.find("test_bucket{le=\"5") != std::string::npos);
CHECK(str.find("test_bucket{le=\"+Inf\"}") != std::string::npos);
```
创建Histogram时需要指定桶(bucket),采样点统计数据会落到不同的桶中,并且还需要统计采样点数据的累计总和(sum)以及次数的总和(count)。注意bucket 列表必须是有序的,否则构造时会抛异常。
Histogram统计的特点是数据是累积的比如由10 100两个桶第一个桶的数据是所有值 <= 10的样本数据存在桶中第二个桶是所有 <=100 的样本数据存在桶中,其它数据则存放在`+Inf`的桶中。
```cpp
auto h = std::make_shared<histogram_t>(
std::string("test"), std::string("help"), std::vector{10.0, 100.0});
metric_t::regiter_metric(h);
h->observe(5);
h->observe(80);
h->observe(120);
std::string str;
h.serialize(str);
std::cout<<str;
```
第一个桶的数量为1第二个桶的数量为2因为小于等于100的样本有两个observe(120)的时候数据不会落到10或者100那两个桶里面而是会落到最后一个桶`+Inf`中,所以`+Inf`桶的数量为3因为小于等于+Inf的样本有3个。
序列化之后得到的指标结果为:
```
# HELP test help
# TYPE test histogram
test_bucket{le="10.000000"} 1.000000
test_bucket{le="100.000000"} 2.000000
test_bucket{le="+Inf"} 3.000000
test_sum 205.000000
test_count 3.000000
```
# summary
## api
```cpp
// Quantiles: 百分位和误差, 如:{{0.5, 0.05}, {0.9, 0.01}, {0.95, 0.005}, {0.99, 0.001}}
summary_t(std::string name, std::string help, Quantiles quantiles);
// 往summary_t插入数据会自动计算百分位的数量
void observe(double value);
// 获取百分位结果
async_simple::coro::Lazy<std::vector<double>> get_rates();
// 获取总和
async_simple::coro::Lazy<double> get_sum();
// 获取插入数据的个数
async_simple::coro::Lazy<uint64_t> get_count();
// 序列化
async_simple::coro::Lazy<void> serialize_async(std::string &str);
```
## 例子
```cpp
summary_t summary{"test_summary",
"summary help",
{{0.5, 0.05}, {0.9, 0.01}, {0.95, 0.005}, {0.99, 0.001}}};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distr(1, 100);
for (int i = 0; i < 50; i++) {
summary.observe(distr(gen));
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
std::string str;
async_simple::coro::syncAwait(summary.serialize_async(str));
std::cout << str;
CHECK(async_simple::coro::syncAwait(summary.get_count()) == 50);
CHECK(async_simple::coro::syncAwait(summary.get_sum()) > 0);
CHECK(str.find("test_summary") != std::string::npos);
CHECK(str.find("test_summary_count") != std::string::npos);
CHECK(str.find("test_summary_sum") != std::string::npos);
CHECK(str.find("test_summary{quantile=\"") != std::string::npos);
```
summary 百分位的计算相比其它指标是最耗时的,应该避免在关键路径上使用它以免对性能造成影响。
创建Summary时需要指定分位数和误差分位数在0到1之间左右都为闭区间比如p50就是一个中位数p99指中位数为0.99的分位数。
```cpp
summary_t summary{"test_summary",
"summary help",
{{0.5, 0.05}, {0.9, 0.01}, {0.95, 0.005}, {0.99, 0.001}}};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distr(1, 100);
for (int i = 0; i < 50; i++) {
summary.observe(distr(gen));
}
std::string str;
summary.serialize(str);
std::cout << str;
```
输出:
```
# HELP test_summary summary help
# TYPE test_summary summary
test_summary{quantile="0.500000"} 45.000000
test_summary{quantile="0.900000"} 83.000000
test_summary{quantile="0.950000"} 88.000000
test_summary{quantile="0.990000"} 93.000000
test_summary_sum 2497.000000
test_summary_count 50
```
## 配置prometheus 前端
安装[prometheus](https://github.com/prometheus/prometheus)之后打开其配置文件prometheus.yml
修改要连接的服务端地址:
```
- targets: ["127.0.0.1:9001"]
```
然后启动prometheusprometheus会定时访问`http://127.0.0.1:9001/metrics` 拉取所有指标数据。
在本地浏览器输入:127.0.0.1:9090, 打开prometheus前端在前端页面的搜索框中输入指标的名称request_count之后就能看到table和graph 结果了。
# cinatra http server中启用内置的metric指标
http server 内置的指标:
```cpp
server_total_req: server总的请求数
server_failed_reqserver总的失败请求数
server_total_fdserver使用的总的句柄数
server_total_recv_bytesserver总共收到的字节数
server_total_send_bytesserver总共发送的字节数
server_req_latencyhttp 请求的延迟,从收到请求到发送响应的时间间隔
server_read_latencyhttp 读请求的延迟读到完整的http数据的时间间隔
```
```cpp
coro_http_server server(1, 9001);
server.use_metrics("/metrics");//这个url默认就是/metrics可以不填
```
在浏览器中输入`http://127.0.0.1:9001/metrics` 即可看到所有的指标。
查看当前server的client pool中有多少client调用`pool.free_client_count()`
查看当前server内部线程池中有多少线程调用`coro_io::get_total_thread_num()`