glean_core/metrics/timing_distribution.rs
1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at https://mozilla.org/MPL/2.0/.
4
5use std::collections::HashMap;
6use std::mem;
7use std::sync::atomic::{AtomicUsize, Ordering};
8use std::sync::{Arc, Mutex};
9use std::time::Duration;
10
11use malloc_size_of_derive::MallocSizeOf;
12
13use crate::common_metric_data::{CommonMetricDataInternal, DynamicLabelType};
14use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
15use crate::histogram::{Functional, Histogram};
16use crate::metrics::time_unit::TimeUnit;
17use crate::metrics::{DistributionData, Metric, MetricType};
18use crate::storage::StorageManager;
19use crate::Glean;
20use crate::{CommonMetricData, TestGetValue};
21
22// The base of the logarithm used to determine bucketing
23const LOG_BASE: f64 = 2.0;
24
25// The buckets per each order of magnitude of the logarithm.
26const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
27
28// Maximum time, which means we retain a maximum of 316 buckets.
29// It is automatically adjusted based on the `time_unit` parameter
30// so that:
31//
32// - `nanosecond` - 10 minutes
33// - `microsecond` - ~6.94 days
34// - `millisecond` - ~19 years
35const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
36
37/// Identifier for a running timer.
38///
39/// Its internals are considered private,
40/// but due to UniFFI's behavior we expose its field for now.
41#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, MallocSizeOf)]
42pub struct TimerId {
43 /// This timer's id.
44 pub id: u64,
45}
46
47impl From<u64> for TimerId {
48 fn from(val: u64) -> TimerId {
49 TimerId { id: val }
50 }
51}
52
53impl From<usize> for TimerId {
54 fn from(val: usize) -> TimerId {
55 TimerId { id: val as u64 }
56 }
57}
58
59/// A timing distribution metric.
60///
61/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
62#[derive(Clone, Debug)]
63pub struct TimingDistributionMetric {
64 meta: Arc<CommonMetricDataInternal>,
65 time_unit: TimeUnit,
66 next_id: Arc<AtomicUsize>,
67 start_times: Arc<Mutex<HashMap<TimerId, u64>>>,
68}
69
70impl ::malloc_size_of::MallocSizeOf for TimingDistributionMetric {
71 fn size_of(&self, ops: &mut malloc_size_of::MallocSizeOfOps) -> usize {
72 // Note: This is behind an `Arc`.
73 // `size_of` should only be called on the main thread to avoid double-counting.
74 self.meta.size_of(ops)
75 + self.time_unit.size_of(ops)
76 + self.next_id.size_of(ops)
77 + self.start_times.lock().unwrap().size_of(ops)
78 }
79}
80
81/// Create a snapshot of the histogram with a time unit.
82///
83/// The snapshot can be serialized into the payload format.
84pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
85 DistributionData {
86 // **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
87 // specialized snapshot function.
88 values: hist
89 .snapshot()
90 .iter()
91 .map(|(&k, &v)| (k as i64, v as i64))
92 .collect(),
93 sum: hist.sum() as i64,
94 count: hist.count() as i64,
95 }
96}
97
98impl MetricType for TimingDistributionMetric {
99 fn meta(&self) -> &CommonMetricDataInternal {
100 &self.meta
101 }
102
103 fn with_name(&self, name: String) -> Self {
104 let mut meta = (*self.meta).clone();
105 meta.inner.name = name;
106 Self {
107 meta: Arc::new(meta),
108 time_unit: self.time_unit,
109 next_id: Arc::new(AtomicUsize::new(1)),
110 start_times: Arc::new(Mutex::new(Default::default())),
111 }
112 }
113
114 fn with_dynamic_label(&self, label: DynamicLabelType) -> Self {
115 let mut meta = (*self.meta).clone();
116 meta.inner.dynamic_label = Some(label);
117 Self {
118 meta: Arc::new(meta),
119 time_unit: self.time_unit,
120 next_id: Arc::new(AtomicUsize::new(1)),
121 start_times: Arc::new(Mutex::new(Default::default())),
122 }
123 }
124}
125
126// IMPORTANT:
127//
128// When changing this implementation, make sure all the operations are
129// also declared in the related trait in `../traits/`.
130impl TimingDistributionMetric {
131 /// Creates a new timing distribution metric.
132 pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
133 Self {
134 meta: Arc::new(meta.into()),
135 time_unit,
136 next_id: Arc::new(AtomicUsize::new(1)),
137 start_times: Arc::new(Mutex::new(Default::default())),
138 }
139 }
140
141 /// Starts tracking time for the provided metric.
142 ///
143 /// This records an error if it’s already tracking time (i.e.
144 /// [`set_start`](TimingDistributionMetric::set_start) was already called with no
145 /// corresponding [`set_stop_and_accumulate`](TimingDistributionMetric::set_stop_and_accumulate)): in
146 /// that case the original start time will be preserved.
147 ///
148 /// # Arguments
149 ///
150 /// * `start_time` - Timestamp in nanoseconds.
151 ///
152 /// # Returns
153 ///
154 /// A unique [`TimerId`] for the new timer.
155 pub fn start(&self) -> TimerId {
156 let start_time = zeitstempel::now();
157 let id = self.next_id.fetch_add(1, Ordering::SeqCst).into();
158 let metric = self.clone();
159 crate::launch_with_glean(move |_glean| metric.set_start(id, start_time));
160 id
161 }
162
163 pub(crate) fn start_sync(&self) -> TimerId {
164 let start_time = zeitstempel::now();
165 let id = self.next_id.fetch_add(1, Ordering::SeqCst).into();
166 let metric = self.clone();
167 metric.set_start(id, start_time);
168 id
169 }
170
171 /// **Test-only API (exported for testing purposes).**
172 ///
173 /// Set start time for this metric synchronously.
174 ///
175 /// Use [`start`](Self::start) instead.
176 #[doc(hidden)]
177 pub fn set_start(&self, id: TimerId, start_time: u64) {
178 let mut map = self.start_times.lock().expect("can't lock timings map");
179 map.insert(id, start_time);
180 }
181
182 /// Stops tracking time for the provided metric and associated timer id.
183 ///
184 /// Adds a count to the corresponding bucket in the timing distribution.
185 /// This will record an error if no
186 /// [`set_start`](TimingDistributionMetric::set_start) was called.
187 ///
188 /// # Arguments
189 ///
190 /// * `id` - The [`TimerId`] to associate with this timing. This allows
191 /// for concurrent timing of events associated with different ids to the
192 /// same timespan metric.
193 /// * `stop_time` - Timestamp in nanoseconds.
194 pub fn stop_and_accumulate(&self, id: TimerId) {
195 let stop_time = zeitstempel::now();
196 let metric = self.clone();
197 crate::launch_with_glean(move |glean| metric.set_stop_and_accumulate(glean, id, stop_time));
198 }
199
200 fn set_stop(&self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
201 let mut start_times = self.start_times.lock().expect("can't lock timings map");
202 let start_time = match start_times.remove(&id) {
203 Some(start_time) => start_time,
204 None => return Err((ErrorType::InvalidState, "Timing not running")),
205 };
206
207 let duration = match stop_time.checked_sub(start_time) {
208 Some(duration) => duration,
209 None => {
210 return Err((
211 ErrorType::InvalidValue,
212 "Timer stopped with negative duration",
213 ))
214 }
215 };
216
217 Ok(duration)
218 }
219
220 /// **Test-only API (exported for testing purposes).**
221 ///
222 /// Set stop time for this metric synchronously.
223 ///
224 /// Use [`stop_and_accumulate`](Self::stop_and_accumulate) instead.
225 #[doc(hidden)]
226 pub fn set_stop_and_accumulate(&self, glean: &Glean, id: TimerId, stop_time: u64) {
227 if !self.should_record(glean) {
228 let mut start_times = self.start_times.lock().expect("can't lock timings map");
229 start_times.remove(&id);
230 return;
231 }
232
233 // Duration is in nanoseconds.
234 let mut duration = match self.set_stop(id, stop_time) {
235 Err((err_type, err_msg)) => {
236 record_error(glean, &self.meta, err_type, err_msg, None);
237 return;
238 }
239 Ok(duration) => duration,
240 };
241
242 let min_sample_time = self.time_unit.as_nanos(1);
243 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
244
245 duration = if duration < min_sample_time {
246 // If measurement is less than the minimum, just truncate. This is
247 // not recorded as an error.
248 min_sample_time
249 } else if duration > max_sample_time {
250 let msg = format!(
251 "Sample is longer than the max for a time_unit of {:?} ({} ns)",
252 self.time_unit, max_sample_time
253 );
254 record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
255 max_sample_time
256 } else {
257 duration
258 };
259
260 if !self.should_record(glean) {
261 return;
262 }
263
264 // Let's be defensive here:
265 // The uploader tries to store some timing distribution metrics,
266 // but in tests that storage might be gone already.
267 // Let's just ignore those.
268 // We do the same for counters.
269 // This should never happen in real app usage.
270 if let Some(storage) = glean.storage_opt() {
271 storage.record_with(glean, &self.meta, |old_value| match old_value {
272 Some(Metric::TimingDistribution(mut hist)) => {
273 hist.accumulate(duration);
274 Metric::TimingDistribution(hist)
275 }
276 _ => {
277 let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
278 hist.accumulate(duration);
279 Metric::TimingDistribution(hist)
280 }
281 });
282 } else {
283 log::warn!(
284 "Couldn't get storage. Can't record timing distribution '{}'.",
285 self.meta.base_identifier()
286 );
287 }
288 }
289
290 /// Aborts a previous [`start`](Self::start) call.
291 ///
292 /// No error is recorded if no [`start`](Self::start) was called.
293 ///
294 /// # Arguments
295 ///
296 /// * `id` - The [`TimerId`] to associate with this timing. This allows
297 /// for concurrent timing of events associated with different ids to the
298 /// same timing distribution metric.
299 pub fn cancel(&self, id: TimerId) {
300 let metric = self.clone();
301 crate::launch_with_glean(move |_glean| metric.cancel_sync(id));
302 }
303
304 /// Aborts a previous [`start`](Self::start) call synchronously.
305 pub(crate) fn cancel_sync(&self, id: TimerId) {
306 let mut map = self.start_times.lock().expect("can't lock timings map");
307 map.remove(&id);
308 }
309
310 /// Accumulates the provided signed samples in the metric.
311 ///
312 /// This is required so that the platform-specific code can provide us with
313 /// 64 bit signed integers if no `u64` comparable type is available. This
314 /// will take care of filtering and reporting errors for any provided negative
315 /// sample.
316 ///
317 /// Please note that this assumes that the provided samples are already in
318 /// the "unit" declared by the instance of the metric type (e.g. if the
319 /// instance this method was called on is using [`TimeUnit::Second`], then
320 /// `samples` are assumed to be in that unit).
321 ///
322 /// # Arguments
323 ///
324 /// * `samples` - The vector holding the samples to be recorded by the metric.
325 ///
326 /// ## Notes
327 ///
328 /// Discards any negative value in `samples` and report an [`ErrorType::InvalidValue`]
329 /// for each of them. Reports an [`ErrorType::InvalidOverflow`] error for samples that
330 /// are longer than `MAX_SAMPLE_TIME`.
331 pub fn accumulate_samples(&self, samples: Vec<i64>) {
332 let metric = self.clone();
333 crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &samples))
334 }
335
336 /// Accumulates precisely one signed sample and appends it to the metric.
337 ///
338 /// Precludes the need for a collection in the most common use case.
339 ///
340 /// Sign is required so that the platform-specific code can provide us with
341 /// a 64 bit signed integer if no `u64` comparable type is available. This
342 /// will take care of filtering and reporting errors for any provided negative
343 /// sample.
344 ///
345 /// Please note that this assumes that the provided sample is already in
346 /// the "unit" declared by the instance of the metric type (e.g. if the
347 /// instance this method was called on is using [`crate::TimeUnit::Second`], then
348 /// `sample` is assumed to be in that unit).
349 ///
350 /// # Arguments
351 ///
352 /// * `sample` - The singular sample to be recorded by the metric.
353 ///
354 /// ## Notes
355 ///
356 /// Discards any negative value and reports an [`ErrorType::InvalidValue`].
357 /// Reports an [`ErrorType::InvalidOverflow`] error if the sample is longer than
358 /// `MAX_SAMPLE_TIME`.
359 pub fn accumulate_single_sample(&self, sample: i64) {
360 let metric = self.clone();
361 crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &[sample]))
362 }
363
364 /// **Test-only API (exported for testing purposes).**
365 /// Accumulates the provided signed samples in the metric.
366 ///
367 /// Use [`accumulate_samples`](Self::accumulate_samples)
368 #[doc(hidden)]
369 pub fn accumulate_samples_sync(&self, glean: &Glean, samples: &[i64]) {
370 if !self.should_record(glean) {
371 return;
372 }
373
374 let mut num_negative_samples = 0;
375 let mut num_too_long_samples = 0;
376 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
377
378 glean.storage().record_with(glean, &self.meta, |old_value| {
379 let mut hist = match old_value {
380 Some(Metric::TimingDistribution(hist)) => hist,
381 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
382 };
383
384 for &sample in samples.iter() {
385 if sample < 0 {
386 num_negative_samples += 1;
387 } else {
388 let mut sample = sample as u64;
389
390 // Check the range prior to converting the incoming unit to
391 // nanoseconds, so we can compare against the constant
392 // MAX_SAMPLE_TIME.
393 if sample == 0 {
394 sample = 1;
395 } else if sample > MAX_SAMPLE_TIME {
396 num_too_long_samples += 1;
397 sample = MAX_SAMPLE_TIME;
398 }
399
400 sample = self.time_unit.as_nanos(sample);
401
402 hist.accumulate(sample);
403 }
404 }
405
406 Metric::TimingDistribution(hist)
407 });
408
409 if num_negative_samples > 0 {
410 let msg = format!("Accumulated {} negative samples", num_negative_samples);
411 record_error(
412 glean,
413 &self.meta,
414 ErrorType::InvalidValue,
415 msg,
416 num_negative_samples,
417 );
418 }
419
420 if num_too_long_samples > 0 {
421 let msg = format!(
422 "{} samples are longer than the maximum of {}",
423 num_too_long_samples, max_sample_time
424 );
425 record_error(
426 glean,
427 &self.meta,
428 ErrorType::InvalidOverflow,
429 msg,
430 num_too_long_samples,
431 );
432 }
433 }
434
435 /// Accumulates the provided samples in the metric.
436 ///
437 /// # Arguments
438 ///
439 /// * `samples` - A list of samples recorded by the metric.
440 /// Samples must be in nanoseconds.
441 /// ## Notes
442 ///
443 /// Reports an [`ErrorType::InvalidOverflow`] error for samples that
444 /// are longer than `MAX_SAMPLE_TIME`.
445 pub fn accumulate_raw_samples_nanos(&self, samples: Vec<u64>) {
446 let metric = self.clone();
447 crate::launch_with_glean(move |glean| {
448 metric.accumulate_raw_samples_nanos_sync(glean, &samples)
449 })
450 }
451
452 /// Accumulates precisely one duration to the metric.
453 ///
454 /// Like `TimingDistribution::accumulate_single_sample`, but for use when the
455 /// duration is:
456 ///
457 /// * measured externally, or
458 /// * is in a unit different from the timing_distribution's internal TimeUnit.
459 ///
460 /// # Arguments
461 ///
462 /// * `duration` - The single duration to be recorded in the metric.
463 ///
464 /// ## Notes
465 ///
466 /// Reports an [`ErrorType::InvalidOverflow`] error if `duration` is longer than
467 /// `MAX_SAMPLE_TIME`.
468 ///
469 /// The API client is responsible for ensuring that `duration` is derived from a
470 /// monotonic clock source that behaves consistently over computer sleep across
471 /// the application's platforms. Otherwise the resulting data may not share the same
472 /// guarantees that other `timing_distribution` metrics' data do.
473 pub fn accumulate_raw_duration(&self, duration: Duration) {
474 let duration_ns = duration.as_nanos().try_into().unwrap_or(u64::MAX);
475 let metric = self.clone();
476 crate::launch_with_glean(move |glean| {
477 metric.accumulate_raw_samples_nanos_sync(glean, &[duration_ns])
478 })
479 }
480
481 /// **Test-only API (exported for testing purposes).**
482 ///
483 /// Accumulates the provided samples in the metric.
484 ///
485 /// Use [`accumulate_raw_samples_nanos`](Self::accumulate_raw_samples_nanos) instead.
486 #[doc(hidden)]
487 pub fn accumulate_raw_samples_nanos_sync(&self, glean: &Glean, samples: &[u64]) {
488 if !self.should_record(glean) {
489 return;
490 }
491
492 let mut num_too_long_samples = 0;
493 let min_sample_time = self.time_unit.as_nanos(1);
494 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
495
496 glean.storage().record_with(glean, &self.meta, |old_value| {
497 let mut hist = match old_value {
498 Some(Metric::TimingDistribution(hist)) => hist,
499 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
500 };
501
502 for &sample in samples.iter() {
503 let mut sample = sample;
504
505 if sample < min_sample_time {
506 sample = min_sample_time;
507 } else if sample > max_sample_time {
508 num_too_long_samples += 1;
509 sample = max_sample_time;
510 }
511
512 // `sample` is in nanoseconds.
513 hist.accumulate(sample);
514 }
515
516 Metric::TimingDistribution(hist)
517 });
518
519 if num_too_long_samples > 0 {
520 let msg = format!(
521 "{} samples are longer than the maximum of {}",
522 num_too_long_samples, max_sample_time
523 );
524 record_error(
525 glean,
526 &self.meta,
527 ErrorType::InvalidOverflow,
528 msg,
529 num_too_long_samples,
530 );
531 }
532 }
533
534 /// Gets the currently stored value as an integer.
535 #[doc(hidden)]
536 pub fn get_value<'a, S: Into<Option<&'a str>>>(
537 &self,
538 glean: &Glean,
539 ping_name: S,
540 ) -> Option<DistributionData> {
541 let queried_ping_name = ping_name
542 .into()
543 .unwrap_or_else(|| &self.meta().inner.send_in_pings[0]);
544
545 match StorageManager.snapshot_metric_for_test(
546 glean.storage(),
547 queried_ping_name,
548 &self.meta.identifier(glean),
549 self.meta.inner.lifetime,
550 ) {
551 Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
552 _ => None,
553 }
554 }
555
556 /// **Exported for test purposes.**
557 ///
558 /// Gets the number of recorded errors for the given metric and error type.
559 ///
560 /// # Arguments
561 ///
562 /// * `error` - The type of error
563 ///
564 /// # Returns
565 ///
566 /// The number of errors reported.
567 pub fn test_get_num_recorded_errors(&self, error: ErrorType) -> i32 {
568 crate::block_on_dispatcher();
569
570 crate::core::with_glean(|glean| {
571 test_get_num_recorded_errors(glean, self.meta(), error).unwrap_or(0)
572 })
573 }
574
575 /// **Experimental:** Start a new histogram buffer associated with this timing distribution metric.
576 ///
577 /// A histogram buffer accumulates in-memory.
578 /// Data is recorded into the metric on drop.
579 pub fn start_buffer(&self) -> LocalTimingDistribution<'_> {
580 LocalTimingDistribution::new(self)
581 }
582
583 fn commit_histogram(&self, histogram: Histogram<Functional>, errors: usize) {
584 let metric = self.clone();
585 crate::launch_with_glean(move |glean| {
586 if errors > 0 {
587 let max_sample_time = metric.time_unit.as_nanos(MAX_SAMPLE_TIME);
588 let msg = format!(
589 "{} samples are longer than the maximum of {}",
590 errors, max_sample_time
591 );
592 record_error(
593 glean,
594 &metric.meta,
595 ErrorType::InvalidValue,
596 msg,
597 Some(errors as i32),
598 );
599 }
600
601 glean
602 .storage()
603 .record_with(glean, &metric.meta, move |old_value| {
604 let mut hist = match old_value {
605 Some(Metric::TimingDistribution(hist)) => hist,
606 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
607 };
608
609 hist.merge(&histogram);
610 Metric::TimingDistribution(hist)
611 });
612 });
613 }
614}
615
616impl TestGetValue<DistributionData> for TimingDistributionMetric {
617 /// **Test-only API (exported for FFI purposes).**
618 ///
619 /// Gets the currently stored value as an integer.
620 ///
621 /// This doesn't clear the stored value.
622 ///
623 /// # Arguments
624 ///
625 /// * `ping_name` - the optional name of the ping to retrieve the metric
626 /// for. Defaults to the first value in `send_in_pings`.
627 ///
628 /// # Returns
629 ///
630 /// The stored value or `None` if nothing stored.
631 fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
632 crate::block_on_dispatcher();
633 crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
634 }
635}
636
637/// **Experimental:** A histogram buffer associated with a specific instance of a [`TimingDistributionMetric`].
638///
639/// Accumulation happens in-memory.
640/// Data is merged into the metric on [`Drop::drop`].
641#[derive(Debug)]
642pub struct LocalTimingDistribution<'a> {
643 histogram: Histogram<Functional>,
644 metric: &'a TimingDistributionMetric,
645 errors: usize,
646}
647
648impl<'a> LocalTimingDistribution<'a> {
649 /// Create a new histogram buffer referencing the timing distribution it will record into.
650 fn new(metric: &'a TimingDistributionMetric) -> Self {
651 let histogram = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
652 Self {
653 histogram,
654 metric,
655 errors: 0,
656 }
657 }
658
659 /// Accumulates one sample into the histogram.
660 ///
661 /// The provided sample must be in the "unit" declared by the instance of the metric type
662 /// (e.g. if the instance this method was called on is using [`crate::TimeUnit::Second`], then
663 /// `sample` is assumed to be in seconds).
664 ///
665 /// Accumulation happens in-memory only.
666 pub fn accumulate(&mut self, sample: u64) {
667 // Check the range prior to converting the incoming unit to
668 // nanoseconds, so we can compare against the constant
669 // MAX_SAMPLE_TIME.
670 let sample = if sample == 0 {
671 1
672 } else if sample > MAX_SAMPLE_TIME {
673 self.errors += 1;
674 MAX_SAMPLE_TIME
675 } else {
676 sample
677 };
678
679 let sample = self.metric.time_unit.as_nanos(sample);
680 self.histogram.accumulate(sample)
681 }
682
683 /// Abandon this histogram buffer and don't commit accumulated data.
684 pub fn abandon(mut self) {
685 self.histogram.clear();
686 }
687}
688
689impl Drop for LocalTimingDistribution<'_> {
690 fn drop(&mut self) {
691 if self.histogram.is_empty() {
692 return;
693 }
694
695 // We want to move that value.
696 // A `0/0` histogram doesn't allocate.
697 let buffer = mem::replace(&mut self.histogram, Histogram::functional(0.0, 0.0));
698 self.metric.commit_histogram(buffer, self.errors);
699 }
700}
701
702#[cfg(test)]
703mod test {
704 use super::*;
705
706 #[test]
707 fn can_snapshot() {
708 use serde_json::json;
709
710 let mut hist = Histogram::functional(2.0, 8.0);
711
712 for i in 1..=10 {
713 hist.accumulate(i);
714 }
715
716 let snap = snapshot(&hist);
717
718 let expected_json = json!({
719 "sum": 55,
720 "values": {
721 "1": 1,
722 "2": 1,
723 "3": 1,
724 "4": 1,
725 "5": 1,
726 "6": 1,
727 "7": 1,
728 "8": 1,
729 "9": 1,
730 "10": 1,
731 },
732 });
733
734 assert_eq!(expected_json, json!(snap));
735 }
736
737 #[test]
738 fn can_snapshot_sparse() {
739 use serde_json::json;
740
741 let mut hist = Histogram::functional(2.0, 8.0);
742
743 hist.accumulate(1024);
744 hist.accumulate(1024);
745 hist.accumulate(1116);
746 hist.accumulate(1448);
747
748 let snap = snapshot(&hist);
749
750 let expected_json = json!({
751 "sum": 4612,
752 "values": {
753 "1024": 2,
754 "1116": 1,
755 "1448": 1,
756 },
757 });
758
759 assert_eq!(expected_json, json!(snap));
760 }
761}