1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0

#![recursion_limit = "128"]

#[macro_use]
extern crate prometheus;

pub mod counters;
mod json_encoder;
pub mod metric_server;

mod service_metrics;
pub use service_metrics::ServiceMetrics;

mod op_counters;
pub use op_counters::OpMetrics;

#[cfg(test)]
mod unit_tests;

// Re-export counter types from prometheus crate
pub use prometheus::{Histogram, IntCounter, IntCounterVec, IntGauge, IntGaugeVec};

use failure::Result;
use logger::prelude::*;
use prometheus::{
    core::{Collector, Metric},
    Encoder, TextEncoder,
};
use std::{
    collections::HashMap,
    fs::{create_dir_all, File, OpenOptions},
    hash::BuildHasher,
    io::Write,
    path::Path,
    thread, time,
};

fn get_metrics_file<P: AsRef<Path>>(dir_path: &P, file_name: &str) -> File {
    create_dir_all(dir_path).expect("Create metrics dir failed");

    let metrics_file_path = dir_path.as_ref().join(file_name);

    info!("Using metrics file {}", metrics_file_path.display());

    OpenOptions::new()
        .append(true)
        .create(true)
        .open(metrics_file_path)
        .expect("Open metrics file failed")
}

fn get_all_metrics_as_serialized_string() -> Result<Vec<u8>> {
    let all_metrics = prometheus::gather();

    let encoder = TextEncoder::new();
    let mut buffer = Vec::new();
    encoder.encode(&all_metrics, &mut buffer)?;
    Ok(buffer)
}

pub fn get_all_metrics() -> HashMap<String, String> {
    // TODO: use an existing metric encoder (same as used by
    // prometheus/metric-server)
    let all_metric_families = prometheus::gather();
    let mut all_metrics = HashMap::new();
    for metric_family in all_metric_families {
        let metrics = metric_family.get_metric();
        for metric in metrics {
            let v = if metric.has_counter() {
                metric.get_counter().get_value().to_string()
            } else if metric.has_gauge() {
                metric.get_gauge().get_value().to_string()
            } else if metric.has_histogram() {
                metric.get_histogram().get_sample_count().to_string()
            } else {
                panic!("Unknown counter {}", metric_family.get_name())
            };
            let mut metric_name = metric_family.get_name().to_owned();
            let labels = metric.get_label();
            if !labels.is_empty() {
                let label_strings: Vec<String> = labels
                    .iter()
                    .map(|l| format!("{}={}", l.get_name(), l.get_value()))
                    .collect();
                let labels_string = format!("{{{}}}", label_strings.join(","));
                metric_name.push_str(&labels_string);
            }

            all_metrics.insert(metric_name, v);
        }
    }

    all_metrics
}

// Launches a background thread which will periodically collect metrics
// every interval and push them to Pushgateway hosted at `address`
pub fn push_all_metrics_to_pushgateway_periodically(
    job: &str,
    address: &str,
    peer_id: &str,
    interval: u64,
) {
    info!("Start pushing metrics to {}", address);
    let job = job.to_owned();
    let addr = address.to_owned();
    let peer_id = peer_id.to_owned();
    thread::spawn(move || loop {
        let res = prometheus::push_metrics(
            &job,
            labels! {"instance".to_owned() => peer_id.clone(), },
            &addr,
            prometheus::gather(),
        );
        if let Err(e) = res {
            error!("Fail to push metrics: {}", e);
        }
        thread::sleep(time::Duration::from_millis(interval));
    });
}

// Launches a background thread which will periodically collect metrics
// every interval and write them to the provided file
pub fn dump_all_metrics_to_file_periodically<P: AsRef<Path>>(
    dir_path: &P,
    file_name: &str,
    interval: u64,
) {
    let mut file = get_metrics_file(dir_path, file_name);
    thread::spawn(move || loop {
        let mut buffer = get_all_metrics_as_serialized_string().expect("Error gathering metrics");
        if !buffer.is_empty() {
            buffer.push(b'\n');
            file.write_all(&buffer).expect("Error writing metrics");
        }
        thread::sleep(time::Duration::from_millis(interval));
    });
}

pub fn export_counter<M, S>(col: &mut HashMap<String, String, S>, counter: &M)
where
    M: Metric,
    S: BuildHasher,
{
    let c = counter.metric();
    col.insert(
        c.get_label()[0].get_name().to_string(),
        c.get_counter().get_value().to_string(),
    );
}

pub fn get_metric_name<M>(metric: &M) -> String
where
    M: Collector,
{
    metric.collect()[0].get_name().to_string()
}