use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
const LOG_BASE: f64 = 2.0;
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct TimerId {
pub id: u64,
}
impl From<u64> for TimerId {
fn from(val: u64) -> TimerId {
TimerId { id: val }
}
}
impl From<usize> for TimerId {
fn from(val: usize) -> TimerId {
TimerId { id: val as u64 }
}
}
#[derive(Clone, Debug)]
pub struct TimingDistributionMetric {
meta: Arc<CommonMetricData>,
time_unit: TimeUnit,
next_id: Arc<AtomicUsize>,
start_times: Arc<Mutex<HashMap<TimerId, u64>>>,
}
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
values: hist
.snapshot()
.into_iter()
.map(|(k, v)| (k as i64, v as i64))
.collect(),
sum: hist.sum() as i64,
}
}
impl MetricType for TimingDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
}
impl TimingDistributionMetric {
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta: Arc::new(meta),
time_unit,
next_id: Arc::new(AtomicUsize::new(0)),
start_times: Arc::new(Mutex::new(Default::default())),
}
}
pub fn start(&self) -> TimerId {
let start_time = time::precise_time_ns();
let id = self.next_id.fetch_add(1, Ordering::SeqCst).into();
let metric = self.clone();
crate::launch_with_glean(move |_glean| metric.set_start(id, start_time));
id
}
#[doc(hidden)]
pub fn set_start(&self, id: TimerId, start_time: u64) {
let mut map = self.start_times.lock().expect("can't lock timings map");
map.insert(id, start_time);
}
pub fn stop_and_accumulate(&self, id: TimerId) {
let stop_time = time::precise_time_ns();
let metric = self.clone();
crate::launch_with_glean(move |glean| metric.set_stop_and_accumulate(glean, id, stop_time));
}
fn set_stop(&self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
let mut start_times = self.start_times.lock().expect("can't lock timings map");
let start_time = match start_times.remove(&id) {
Some(start_time) => start_time,
None => return Err((ErrorType::InvalidState, "Timing not running")),
};
let duration = match stop_time.checked_sub(start_time) {
Some(duration) => duration,
None => {
return Err((
ErrorType::InvalidValue,
"Timer stopped with negative duration",
))
}
};
Ok(duration)
}
#[doc(hidden)]
pub fn set_stop_and_accumulate(&self, glean: &Glean, id: TimerId, stop_time: u64) {
if !self.should_record(glean) {
let mut start_times = self.start_times.lock().expect("can't lock timings map");
start_times.remove(&id);
return;
}
let mut duration = match self.set_stop(id, stop_time) {
Err((err_type, err_msg)) => {
record_error(glean, &self.meta, err_type, err_msg, None);
return;
}
Ok(duration) => duration,
};
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::TimingDistribution(mut hist)) => {
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
});
}
pub fn cancel(&self, id: TimerId) {
let metric = self.clone();
crate::launch_with_glean(move |_glean| metric.cancel_sync(id));
}
fn cancel_sync(&self, id: TimerId) {
let mut map = self.start_times.lock().expect("can't lock timings map");
map.remove(&id);
}
pub fn accumulate_samples(&self, samples: Vec<i64>) {
let metric = self.clone();
crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, samples))
}
#[doc(hidden)]
pub fn accumulate_samples_sync(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let mut sample = sample as u64;
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
Metric::TimingDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
pub fn accumulate_raw_samples_nanos(&self, samples: Vec<u64>) {
let metric = self.clone();
crate::launch_with_glean(move |glean| {
metric.accumulate_raw_samples_nanos_sync(glean, &samples)
})
}
#[doc(hidden)]
pub fn accumulate_raw_samples_nanos_sync(&self, glean: &Glean, samples: &[u64]) {
if !self.should_record(glean) {
return;
}
let mut num_too_long_samples = 0;
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
let mut sample = sample;
if sample < min_sample_time {
sample = min_sample_time;
} else if sample > max_sample_time {
num_too_long_samples += 1;
sample = max_sample_time;
}
hist.accumulate(sample);
}
Metric::TimingDistribution(hist)
});
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
#[doc(hidden)]
pub fn get_value<'a, S: Into<Option<&'a str>>>(
&self,
glean: &Glean,
ping_name: S,
) -> Option<DistributionData> {
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.meta().send_in_pings[0]);
match StorageManager.snapshot_metric_for_test(
glean.storage(),
queried_ping_name,
&self.meta.identifier(glean),
self.meta.lifetime,
) {
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
}
pub fn test_get_num_recorded_errors(&self, error: ErrorType, ping_name: Option<String>) -> i32 {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| {
test_get_num_recorded_errors(glean, self.meta(), error, ping_name.as_deref())
.unwrap_or(0)
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_snapshot() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
for i in 1..=10 {
hist.accumulate(i);
}
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 55,
"values": {
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
#[test]
fn can_snapshot_sparse() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
hist.accumulate(1024);
hist.accumulate(1024);
hist.accumulate(1116);
hist.accumulate(1448);
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 4612,
"values": {
"1024": 2,
"1116": 1,
"1217": 0,
"1327": 0,
"1448": 1,
"1579": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
}