proxy_multi/
proxy_multi.rs

1//! Use the proxy to send metrics to multiple outputs
2
3/// Create a pipeline that fans out
4/// The key here is to use AtomicBucket to read
5/// from the proxy and aggregate and flush metrics
6///
7/// Proxy
8///     -> AtomicBucket
9///             -> MultiOutput
10///                     -> Prometheus
11///                     -> Statsd
12///                     -> stdout
13use dipstick::*;
14use std::time::Duration;
15
16metrics! {
17    pub PROXY: Proxy = "my_proxy" => {}
18}
19
20fn main() {
21    // Placeholder to collect output targets
22    // This will prefix all metrics with "my_stats"
23    // before flushing them.
24    let mut targets = MultiInput::new().named("my_stats");
25
26    // Skip the metrics here... we just use this for the output
27    // Follow the same pattern for Statsd, Graphite, etc.
28    let prometheus = Prometheus::push_to("http://localhost:9091/metrics/job/dipstick_example")
29        .expect("Prometheus Socket");
30    targets = targets.add_target(prometheus);
31
32    // Add stdout
33    targets = targets.add_target(Stream::write_to_stdout());
34
35    // Create the stats and drain targets
36    let bucket = AtomicBucket::new();
37    bucket.drain(targets);
38    // Crucial, set the flush interval, otherwise risk hammering targets
39    bucket.flush_every(Duration::from_secs(3));
40
41    // Now wire up the proxy target with the stats and you're all set
42    let proxy = Proxy::default();
43    proxy.target(bucket.clone());
44
45    // Example using the macro! Proxy sugar
46    PROXY.target(bucket.named("global"));
47
48    loop {
49        // Using the default proxy
50        proxy.counter("beans").count(100);
51        proxy.timer("braincells").interval_us(420);
52        // global example
53        PROXY.counter("my_proxy_counter").count(123);
54        PROXY.timer("my_proxy_timer").interval_us(2000000);
55        std::thread::sleep(Duration::from_millis(100));
56    }
57}