glean_core/metrics/timing_distribution.rs
1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at https://mozilla.org/MPL/2.0/.
4
5use std::collections::HashMap;
6use std::mem;
7use std::sync::atomic::{AtomicUsize, Ordering};
8use std::sync::{Arc, Mutex};
9use std::time::Duration;
10
11use crate::common_metric_data::CommonMetricDataInternal;
12use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
13use crate::histogram::{Functional, Histogram};
14use crate::metrics::time_unit::TimeUnit;
15use crate::metrics::{DistributionData, Metric, MetricType};
16use crate::storage::StorageManager;
17use crate::CommonMetricData;
18use crate::Glean;
19
20// The base of the logarithm used to determine bucketing
21const LOG_BASE: f64 = 2.0;
22
23// The buckets per each order of magnitude of the logarithm.
24const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
25
26// Maximum time, which means we retain a maximum of 316 buckets.
27// It is automatically adjusted based on the `time_unit` parameter
28// so that:
29//
30// - `nanosecond` - 10 minutes
31// - `microsecond` - ~6.94 days
32// - `millisecond` - ~19 years
33const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
34
35/// Identifier for a running timer.
36///
37/// Its internals are considered private,
38/// but due to UniFFI's behavior we expose its field for now.
39#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
40pub struct TimerId {
41 /// This timer's id.
42 pub id: u64,
43}
44
45impl From<u64> for TimerId {
46 fn from(val: u64) -> TimerId {
47 TimerId { id: val }
48 }
49}
50
51impl From<usize> for TimerId {
52 fn from(val: usize) -> TimerId {
53 TimerId { id: val as u64 }
54 }
55}
56
57/// A timing distribution metric.
58///
59/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
60#[derive(Clone, Debug)]
61pub struct TimingDistributionMetric {
62 meta: Arc<CommonMetricDataInternal>,
63 time_unit: TimeUnit,
64 next_id: Arc<AtomicUsize>,
65 start_times: Arc<Mutex<HashMap<TimerId, u64>>>,
66}
67
68/// Create a snapshot of the histogram with a time unit.
69///
70/// The snapshot can be serialized into the payload format.
71pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
72 DistributionData {
73 // **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
74 // specialized snapshot function.
75 values: hist
76 .snapshot()
77 .iter()
78 .map(|(&k, &v)| (k as i64, v as i64))
79 .collect(),
80 sum: hist.sum() as i64,
81 count: hist.count() as i64,
82 }
83}
84
85impl MetricType for TimingDistributionMetric {
86 fn meta(&self) -> &CommonMetricDataInternal {
87 &self.meta
88 }
89
90 fn with_name(&self, name: String) -> Self {
91 let mut meta = (*self.meta).clone();
92 meta.inner.name = name;
93 Self {
94 meta: Arc::new(meta),
95 time_unit: self.time_unit,
96 next_id: Arc::new(AtomicUsize::new(1)),
97 start_times: Arc::new(Mutex::new(Default::default())),
98 }
99 }
100
101 fn with_dynamic_label(&self, label: String) -> Self {
102 let mut meta = (*self.meta).clone();
103 meta.inner.dynamic_label = Some(label);
104 Self {
105 meta: Arc::new(meta),
106 time_unit: self.time_unit,
107 next_id: Arc::new(AtomicUsize::new(1)),
108 start_times: Arc::new(Mutex::new(Default::default())),
109 }
110 }
111}
112
113// IMPORTANT:
114//
115// When changing this implementation, make sure all the operations are
116// also declared in the related trait in `../traits/`.
117impl TimingDistributionMetric {
118 /// Creates a new timing distribution metric.
119 pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
120 Self {
121 meta: Arc::new(meta.into()),
122 time_unit,
123 next_id: Arc::new(AtomicUsize::new(1)),
124 start_times: Arc::new(Mutex::new(Default::default())),
125 }
126 }
127
128 /// Starts tracking time for the provided metric.
129 ///
130 /// This records an error if it’s already tracking time (i.e.
131 /// [`set_start`](TimingDistributionMetric::set_start) was already called with no
132 /// corresponding [`set_stop_and_accumulate`](TimingDistributionMetric::set_stop_and_accumulate)): in
133 /// that case the original start time will be preserved.
134 ///
135 /// # Arguments
136 ///
137 /// * `start_time` - Timestamp in nanoseconds.
138 ///
139 /// # Returns
140 ///
141 /// A unique [`TimerId`] for the new timer.
142 pub fn start(&self) -> TimerId {
143 let start_time = time::precise_time_ns();
144 let id = self.next_id.fetch_add(1, Ordering::SeqCst).into();
145 let metric = self.clone();
146 crate::launch_with_glean(move |_glean| metric.set_start(id, start_time));
147 id
148 }
149
150 pub(crate) fn start_sync(&self) -> TimerId {
151 let start_time = time::precise_time_ns();
152 let id = self.next_id.fetch_add(1, Ordering::SeqCst).into();
153 let metric = self.clone();
154 metric.set_start(id, start_time);
155 id
156 }
157
158 /// **Test-only API (exported for testing purposes).**
159 ///
160 /// Set start time for this metric synchronously.
161 ///
162 /// Use [`start`](Self::start) instead.
163 #[doc(hidden)]
164 pub fn set_start(&self, id: TimerId, start_time: u64) {
165 let mut map = self.start_times.lock().expect("can't lock timings map");
166 map.insert(id, start_time);
167 }
168
169 /// Stops tracking time for the provided metric and associated timer id.
170 ///
171 /// Adds a count to the corresponding bucket in the timing distribution.
172 /// This will record an error if no
173 /// [`set_start`](TimingDistributionMetric::set_start) was called.
174 ///
175 /// # Arguments
176 ///
177 /// * `id` - The [`TimerId`] to associate with this timing. This allows
178 /// for concurrent timing of events associated with different ids to the
179 /// same timespan metric.
180 /// * `stop_time` - Timestamp in nanoseconds.
181 pub fn stop_and_accumulate(&self, id: TimerId) {
182 let stop_time = time::precise_time_ns();
183 let metric = self.clone();
184 crate::launch_with_glean(move |glean| metric.set_stop_and_accumulate(glean, id, stop_time));
185 }
186
187 fn set_stop(&self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
188 let mut start_times = self.start_times.lock().expect("can't lock timings map");
189 let start_time = match start_times.remove(&id) {
190 Some(start_time) => start_time,
191 None => return Err((ErrorType::InvalidState, "Timing not running")),
192 };
193
194 let duration = match stop_time.checked_sub(start_time) {
195 Some(duration) => duration,
196 None => {
197 return Err((
198 ErrorType::InvalidValue,
199 "Timer stopped with negative duration",
200 ))
201 }
202 };
203
204 Ok(duration)
205 }
206
207 /// **Test-only API (exported for testing purposes).**
208 ///
209 /// Set stop time for this metric synchronously.
210 ///
211 /// Use [`stop_and_accumulate`](Self::stop_and_accumulate) instead.
212 #[doc(hidden)]
213 pub fn set_stop_and_accumulate(&self, glean: &Glean, id: TimerId, stop_time: u64) {
214 if !self.should_record(glean) {
215 let mut start_times = self.start_times.lock().expect("can't lock timings map");
216 start_times.remove(&id);
217 return;
218 }
219
220 // Duration is in nanoseconds.
221 let mut duration = match self.set_stop(id, stop_time) {
222 Err((err_type, err_msg)) => {
223 record_error(glean, &self.meta, err_type, err_msg, None);
224 return;
225 }
226 Ok(duration) => duration,
227 };
228
229 let min_sample_time = self.time_unit.as_nanos(1);
230 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
231
232 duration = if duration < min_sample_time {
233 // If measurement is less than the minimum, just truncate. This is
234 // not recorded as an error.
235 min_sample_time
236 } else if duration > max_sample_time {
237 let msg = format!(
238 "Sample is longer than the max for a time_unit of {:?} ({} ns)",
239 self.time_unit, max_sample_time
240 );
241 record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
242 max_sample_time
243 } else {
244 duration
245 };
246
247 if !self.should_record(glean) {
248 return;
249 }
250
251 // Let's be defensive here:
252 // The uploader tries to store some timing distribution metrics,
253 // but in tests that storage might be gone already.
254 // Let's just ignore those.
255 // We do the same for counters.
256 // This should never happen in real app usage.
257 if let Some(storage) = glean.storage_opt() {
258 storage.record_with(glean, &self.meta, |old_value| match old_value {
259 Some(Metric::TimingDistribution(mut hist)) => {
260 hist.accumulate(duration);
261 Metric::TimingDistribution(hist)
262 }
263 _ => {
264 let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
265 hist.accumulate(duration);
266 Metric::TimingDistribution(hist)
267 }
268 });
269 } else {
270 log::warn!(
271 "Couldn't get storage. Can't record timing distribution '{}'.",
272 self.meta.base_identifier()
273 );
274 }
275 }
276
277 /// Aborts a previous [`start`](Self::start) call.
278 ///
279 /// No error is recorded if no [`start`](Self::start) was called.
280 ///
281 /// # Arguments
282 ///
283 /// * `id` - The [`TimerId`] to associate with this timing. This allows
284 /// for concurrent timing of events associated with different ids to the
285 /// same timing distribution metric.
286 pub fn cancel(&self, id: TimerId) {
287 let metric = self.clone();
288 crate::launch_with_glean(move |_glean| metric.cancel_sync(id));
289 }
290
291 /// Aborts a previous [`start`](Self::start) call synchronously.
292 pub(crate) fn cancel_sync(&self, id: TimerId) {
293 let mut map = self.start_times.lock().expect("can't lock timings map");
294 map.remove(&id);
295 }
296
297 /// Accumulates the provided signed samples in the metric.
298 ///
299 /// This is required so that the platform-specific code can provide us with
300 /// 64 bit signed integers if no `u64` comparable type is available. This
301 /// will take care of filtering and reporting errors for any provided negative
302 /// sample.
303 ///
304 /// Please note that this assumes that the provided samples are already in
305 /// the "unit" declared by the instance of the metric type (e.g. if the
306 /// instance this method was called on is using [`TimeUnit::Second`], then
307 /// `samples` are assumed to be in that unit).
308 ///
309 /// # Arguments
310 ///
311 /// * `samples` - The vector holding the samples to be recorded by the metric.
312 ///
313 /// ## Notes
314 ///
315 /// Discards any negative value in `samples` and report an [`ErrorType::InvalidValue`]
316 /// for each of them. Reports an [`ErrorType::InvalidOverflow`] error for samples that
317 /// are longer than `MAX_SAMPLE_TIME`.
318 pub fn accumulate_samples(&self, samples: Vec<i64>) {
319 let metric = self.clone();
320 crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &samples))
321 }
322
323 /// Accumulates precisely one signed sample and appends it to the metric.
324 ///
325 /// Precludes the need for a collection in the most common use case.
326 ///
327 /// Sign is required so that the platform-specific code can provide us with
328 /// a 64 bit signed integer if no `u64` comparable type is available. This
329 /// will take care of filtering and reporting errors for any provided negative
330 /// sample.
331 ///
332 /// Please note that this assumes that the provided sample is already in
333 /// the "unit" declared by the instance of the metric type (e.g. if the
334 /// instance this method was called on is using [`crate::TimeUnit::Second`], then
335 /// `sample` is assumed to be in that unit).
336 ///
337 /// # Arguments
338 ///
339 /// * `sample` - The singular sample to be recorded by the metric.
340 ///
341 /// ## Notes
342 ///
343 /// Discards any negative value and reports an [`ErrorType::InvalidValue`].
344 /// Reports an [`ErrorType::InvalidOverflow`] error if the sample is longer than
345 /// `MAX_SAMPLE_TIME`.
346 pub fn accumulate_single_sample(&self, sample: i64) {
347 let metric = self.clone();
348 crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &[sample]))
349 }
350
351 /// **Test-only API (exported for testing purposes).**
352 /// Accumulates the provided signed samples in the metric.
353 ///
354 /// Use [`accumulate_samples`](Self::accumulate_samples)
355 #[doc(hidden)]
356 pub fn accumulate_samples_sync(&self, glean: &Glean, samples: &[i64]) {
357 if !self.should_record(glean) {
358 return;
359 }
360
361 let mut num_negative_samples = 0;
362 let mut num_too_long_samples = 0;
363 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
364
365 glean.storage().record_with(glean, &self.meta, |old_value| {
366 let mut hist = match old_value {
367 Some(Metric::TimingDistribution(hist)) => hist,
368 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
369 };
370
371 for &sample in samples.iter() {
372 if sample < 0 {
373 num_negative_samples += 1;
374 } else {
375 let mut sample = sample as u64;
376
377 // Check the range prior to converting the incoming unit to
378 // nanoseconds, so we can compare against the constant
379 // MAX_SAMPLE_TIME.
380 if sample == 0 {
381 sample = 1;
382 } else if sample > MAX_SAMPLE_TIME {
383 num_too_long_samples += 1;
384 sample = MAX_SAMPLE_TIME;
385 }
386
387 sample = self.time_unit.as_nanos(sample);
388
389 hist.accumulate(sample);
390 }
391 }
392
393 Metric::TimingDistribution(hist)
394 });
395
396 if num_negative_samples > 0 {
397 let msg = format!("Accumulated {} negative samples", num_negative_samples);
398 record_error(
399 glean,
400 &self.meta,
401 ErrorType::InvalidValue,
402 msg,
403 num_negative_samples,
404 );
405 }
406
407 if num_too_long_samples > 0 {
408 let msg = format!(
409 "{} samples are longer than the maximum of {}",
410 num_too_long_samples, max_sample_time
411 );
412 record_error(
413 glean,
414 &self.meta,
415 ErrorType::InvalidOverflow,
416 msg,
417 num_too_long_samples,
418 );
419 }
420 }
421
422 /// Accumulates the provided samples in the metric.
423 ///
424 /// # Arguments
425 ///
426 /// * `samples` - A list of samples recorded by the metric.
427 /// Samples must be in nanoseconds.
428 /// ## Notes
429 ///
430 /// Reports an [`ErrorType::InvalidOverflow`] error for samples that
431 /// are longer than `MAX_SAMPLE_TIME`.
432 pub fn accumulate_raw_samples_nanos(&self, samples: Vec<u64>) {
433 let metric = self.clone();
434 crate::launch_with_glean(move |glean| {
435 metric.accumulate_raw_samples_nanos_sync(glean, &samples)
436 })
437 }
438
439 /// Accumulates precisely one duration to the metric.
440 ///
441 /// Like `TimingDistribution::accumulate_single_sample`, but for use when the
442 /// duration is:
443 ///
444 /// * measured externally, or
445 /// * is in a unit different from the timing_distribution's internal TimeUnit.
446 ///
447 /// # Arguments
448 ///
449 /// * `duration` - The single duration to be recorded in the metric.
450 ///
451 /// ## Notes
452 ///
453 /// Reports an [`ErrorType::InvalidOverflow`] error if `duration` is longer than
454 /// `MAX_SAMPLE_TIME`.
455 ///
456 /// The API client is responsible for ensuring that `duration` is derived from a
457 /// monotonic clock source that behaves consistently over computer sleep across
458 /// the application's platforms. Otherwise the resulting data may not share the same
459 /// guarantees that other `timing_distribution` metrics' data do.
460 pub fn accumulate_raw_duration(&self, duration: Duration) {
461 let duration_ns = duration.as_nanos().try_into().unwrap_or(u64::MAX);
462 let metric = self.clone();
463 crate::launch_with_glean(move |glean| {
464 metric.accumulate_raw_samples_nanos_sync(glean, &[duration_ns])
465 })
466 }
467
468 /// **Test-only API (exported for testing purposes).**
469 ///
470 /// Accumulates the provided samples in the metric.
471 ///
472 /// Use [`accumulate_raw_samples_nanos`](Self::accumulate_raw_samples_nanos) instead.
473 #[doc(hidden)]
474 pub fn accumulate_raw_samples_nanos_sync(&self, glean: &Glean, samples: &[u64]) {
475 if !self.should_record(glean) {
476 return;
477 }
478
479 let mut num_too_long_samples = 0;
480 let min_sample_time = self.time_unit.as_nanos(1);
481 let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
482
483 glean.storage().record_with(glean, &self.meta, |old_value| {
484 let mut hist = match old_value {
485 Some(Metric::TimingDistribution(hist)) => hist,
486 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
487 };
488
489 for &sample in samples.iter() {
490 let mut sample = sample;
491
492 if sample < min_sample_time {
493 sample = min_sample_time;
494 } else if sample > max_sample_time {
495 num_too_long_samples += 1;
496 sample = max_sample_time;
497 }
498
499 // `sample` is in nanoseconds.
500 hist.accumulate(sample);
501 }
502
503 Metric::TimingDistribution(hist)
504 });
505
506 if num_too_long_samples > 0 {
507 let msg = format!(
508 "{} samples are longer than the maximum of {}",
509 num_too_long_samples, max_sample_time
510 );
511 record_error(
512 glean,
513 &self.meta,
514 ErrorType::InvalidOverflow,
515 msg,
516 num_too_long_samples,
517 );
518 }
519 }
520
521 /// Gets the currently stored value as an integer.
522 #[doc(hidden)]
523 pub fn get_value<'a, S: Into<Option<&'a str>>>(
524 &self,
525 glean: &Glean,
526 ping_name: S,
527 ) -> Option<DistributionData> {
528 let queried_ping_name = ping_name
529 .into()
530 .unwrap_or_else(|| &self.meta().inner.send_in_pings[0]);
531
532 match StorageManager.snapshot_metric_for_test(
533 glean.storage(),
534 queried_ping_name,
535 &self.meta.identifier(glean),
536 self.meta.inner.lifetime,
537 ) {
538 Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
539 _ => None,
540 }
541 }
542
543 /// **Test-only API (exported for FFI purposes).**
544 ///
545 /// Gets the currently stored value as an integer.
546 ///
547 /// This doesn't clear the stored value.
548 ///
549 /// # Arguments
550 ///
551 /// * `ping_name` - the optional name of the ping to retrieve the metric
552 /// for. Defaults to the first value in `send_in_pings`.
553 ///
554 /// # Returns
555 ///
556 /// The stored value or `None` if nothing stored.
557 pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
558 crate::block_on_dispatcher();
559 crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
560 }
561
562 /// **Exported for test purposes.**
563 ///
564 /// Gets the number of recorded errors for the given metric and error type.
565 ///
566 /// # Arguments
567 ///
568 /// * `error` - The type of error
569 ///
570 /// # Returns
571 ///
572 /// The number of errors reported.
573 pub fn test_get_num_recorded_errors(&self, error: ErrorType) -> i32 {
574 crate::block_on_dispatcher();
575
576 crate::core::with_glean(|glean| {
577 test_get_num_recorded_errors(glean, self.meta(), error).unwrap_or(0)
578 })
579 }
580
581 /// **Experimental:** Start a new histogram buffer associated with this timing distribution metric.
582 ///
583 /// A histogram buffer accumulates in-memory.
584 /// Data is recorded into the metric on drop.
585 pub fn start_buffer(&self) -> LocalTimingDistribution<'_> {
586 LocalTimingDistribution::new(self)
587 }
588
589 fn commit_histogram(&self, histogram: Histogram<Functional>, errors: usize) {
590 let metric = self.clone();
591 crate::launch_with_glean(move |glean| {
592 if errors > 0 {
593 let max_sample_time = metric.time_unit.as_nanos(MAX_SAMPLE_TIME);
594 let msg = format!(
595 "{} samples are longer than the maximum of {}",
596 errors, max_sample_time
597 );
598 record_error(
599 glean,
600 &metric.meta,
601 ErrorType::InvalidValue,
602 msg,
603 Some(errors as i32),
604 );
605 }
606
607 glean
608 .storage()
609 .record_with(glean, &metric.meta, move |old_value| {
610 let mut hist = match old_value {
611 Some(Metric::TimingDistribution(hist)) => hist,
612 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
613 };
614
615 hist.merge(&histogram);
616 Metric::TimingDistribution(hist)
617 });
618 });
619 }
620}
621
622/// **Experimental:** A histogram buffer associated with a specific instance of a [`TimingDistributionMetric`].
623///
624/// Accumulation happens in-memory.
625/// Data is merged into the metric on [`Drop::drop`].
626#[derive(Debug)]
627pub struct LocalTimingDistribution<'a> {
628 histogram: Histogram<Functional>,
629 metric: &'a TimingDistributionMetric,
630 errors: usize,
631}
632
633impl<'a> LocalTimingDistribution<'a> {
634 /// Create a new histogram buffer referencing the timing distribution it will record into.
635 fn new(metric: &'a TimingDistributionMetric) -> Self {
636 let histogram = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
637 Self {
638 histogram,
639 metric,
640 errors: 0,
641 }
642 }
643
644 /// Accumulates one sample into the histogram.
645 ///
646 /// The provided sample must be in the "unit" declared by the instance of the metric type
647 /// (e.g. if the instance this method was called on is using [`crate::TimeUnit::Second`], then
648 /// `sample` is assumed to be in seconds).
649 ///
650 /// Accumulation happens in-memory only.
651 pub fn accumulate(&mut self, sample: u64) {
652 // Check the range prior to converting the incoming unit to
653 // nanoseconds, so we can compare against the constant
654 // MAX_SAMPLE_TIME.
655 let sample = if sample == 0 {
656 1
657 } else if sample > MAX_SAMPLE_TIME {
658 self.errors += 1;
659 MAX_SAMPLE_TIME
660 } else {
661 sample
662 };
663
664 let sample = self.metric.time_unit.as_nanos(sample);
665 self.histogram.accumulate(sample)
666 }
667
668 /// Abandon this histogram buffer and don't commit accumulated data.
669 pub fn abandon(mut self) {
670 self.histogram.clear();
671 }
672}
673
674impl Drop for LocalTimingDistribution<'_> {
675 fn drop(&mut self) {
676 if self.histogram.is_empty() {
677 return;
678 }
679
680 // We want to move that value.
681 // A `0/0` histogram doesn't allocate.
682 let buffer = mem::replace(&mut self.histogram, Histogram::functional(0.0, 0.0));
683 self.metric.commit_histogram(buffer, self.errors);
684 }
685}
686
687#[cfg(test)]
688mod test {
689 use super::*;
690
691 #[test]
692 fn can_snapshot() {
693 use serde_json::json;
694
695 let mut hist = Histogram::functional(2.0, 8.0);
696
697 for i in 1..=10 {
698 hist.accumulate(i);
699 }
700
701 let snap = snapshot(&hist);
702
703 let expected_json = json!({
704 "sum": 55,
705 "values": {
706 "1": 1,
707 "2": 1,
708 "3": 1,
709 "4": 1,
710 "5": 1,
711 "6": 1,
712 "7": 1,
713 "8": 1,
714 "9": 1,
715 "10": 1,
716 },
717 });
718
719 assert_eq!(expected_json, json!(snap));
720 }
721
722 #[test]
723 fn can_snapshot_sparse() {
724 use serde_json::json;
725
726 let mut hist = Histogram::functional(2.0, 8.0);
727
728 hist.accumulate(1024);
729 hist.accumulate(1024);
730 hist.accumulate(1116);
731 hist.accumulate(1448);
732
733 let snap = snapshot(&hist);
734
735 let expected_json = json!({
736 "sum": 4612,
737 "values": {
738 "1024": 2,
739 "1116": 1,
740 "1448": 1,
741 },
742 });
743
744 assert_eq!(expected_json, json!(snap));
745 }
746}