1use std::mem;
6use std::sync::Arc;
7
8use crate::common_metric_data::{CommonMetricDataInternal, DynamicLabelType};
9use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
10use crate::histogram::{Functional, Histogram};
11use crate::metrics::memory_unit::MemoryUnit;
12use crate::metrics::{DistributionData, Metric, MetricType};
13use crate::storage::StorageManager;
14use crate::Glean;
15use crate::{CommonMetricData, TestGetValue};
16
17const LOG_BASE: f64 = 2.0;
19
20const BUCKETS_PER_MAGNITUDE: f64 = 16.0;
22
23const MAX_BYTES: u64 = 1 << 40;
26
27#[derive(Clone, Debug)]
31pub struct MemoryDistributionMetric {
32 meta: Arc<CommonMetricDataInternal>,
33 memory_unit: MemoryUnit,
34}
35
36pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
40 DistributionData {
41 values: hist
44 .snapshot()
45 .iter()
46 .map(|(&k, &v)| (k as i64, v as i64))
47 .collect(),
48 sum: hist.sum() as i64,
49 count: hist.count() as i64,
50 }
51}
52
53impl MetricType for MemoryDistributionMetric {
54 fn meta(&self) -> &CommonMetricDataInternal {
55 &self.meta
56 }
57
58 fn with_name(&self, name: String) -> Self {
59 let mut meta = (*self.meta).clone();
60 meta.inner.name = name;
61 Self {
62 meta: Arc::new(meta),
63 memory_unit: self.memory_unit,
64 }
65 }
66
67 fn with_dynamic_label(&self, label: DynamicLabelType) -> Self {
68 let mut meta = (*self.meta).clone();
69 meta.inner.dynamic_label = Some(label);
70 Self {
71 meta: Arc::new(meta),
72 memory_unit: self.memory_unit,
73 }
74 }
75}
76
77impl MemoryDistributionMetric {
82 pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
84 Self {
85 meta: Arc::new(meta.into()),
86 memory_unit,
87 }
88 }
89
90 pub fn accumulate(&self, sample: i64) {
102 let metric = self.clone();
103 crate::launch_with_glean(move |glean| metric.accumulate_sync(glean, sample))
104 }
105
106 #[doc(hidden)]
110 pub fn accumulate_sync(&self, glean: &Glean, sample: i64) {
111 if !self.should_record(glean) {
112 return;
113 }
114
115 if sample < 0 {
116 record_error(
117 glean,
118 &self.meta,
119 ErrorType::InvalidValue,
120 "Accumulated a negative sample",
121 None,
122 );
123 return;
124 }
125
126 let mut sample = self.memory_unit.as_bytes(sample as u64);
127
128 if sample > MAX_BYTES {
129 let msg = "Sample is bigger than 1 terabyte";
130 record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
131 sample = MAX_BYTES;
132 }
133
134 if let Some(storage) = glean.storage_opt() {
141 storage.record_with(glean, &self.meta, |old_value| match old_value {
142 Some(Metric::MemoryDistribution(mut hist)) => {
143 hist.accumulate(sample);
144 Metric::MemoryDistribution(hist)
145 }
146 _ => {
147 let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
148 hist.accumulate(sample);
149 Metric::MemoryDistribution(hist)
150 }
151 });
152 } else {
153 log::warn!(
154 "Couldn't get storage. Can't record memory distribution '{}'.",
155 self.meta.base_identifier()
156 );
157 }
158 }
159
160 pub fn accumulate_samples(&self, samples: Vec<i64>) {
184 let metric = self.clone();
185 crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, samples))
186 }
187
188 #[doc(hidden)]
192 pub fn accumulate_samples_sync(&self, glean: &Glean, samples: Vec<i64>) {
193 if !self.should_record(glean) {
194 return;
195 }
196
197 let mut num_negative_samples = 0;
198 let mut num_too_log_samples = 0;
199
200 glean.storage().record_with(glean, &self.meta, |old_value| {
201 let mut hist = match old_value {
202 Some(Metric::MemoryDistribution(hist)) => hist,
203 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
204 };
205
206 for &sample in samples.iter() {
207 if sample < 0 {
208 num_negative_samples += 1;
209 } else {
210 let sample = sample as u64;
211 let mut sample = self.memory_unit.as_bytes(sample);
212 if sample > MAX_BYTES {
213 num_too_log_samples += 1;
214 sample = MAX_BYTES;
215 }
216
217 hist.accumulate(sample);
218 }
219 }
220 Metric::MemoryDistribution(hist)
221 });
222
223 if num_negative_samples > 0 {
224 let msg = format!("Accumulated {} negative samples", num_negative_samples);
225 record_error(
226 glean,
227 &self.meta,
228 ErrorType::InvalidValue,
229 msg,
230 num_negative_samples,
231 );
232 }
233
234 if num_too_log_samples > 0 {
235 let msg = format!(
236 "Accumulated {} samples larger than 1TB",
237 num_too_log_samples
238 );
239 record_error(
240 glean,
241 &self.meta,
242 ErrorType::InvalidValue,
243 msg,
244 num_too_log_samples,
245 );
246 }
247 }
248
249 #[doc(hidden)]
251 pub fn get_value<'a, S: Into<Option<&'a str>>>(
252 &self,
253 glean: &Glean,
254 ping_name: S,
255 ) -> Option<DistributionData> {
256 let queried_ping_name = ping_name
257 .into()
258 .unwrap_or_else(|| &self.meta().inner.send_in_pings[0]);
259
260 match StorageManager.snapshot_metric_for_test(
261 glean.storage(),
262 queried_ping_name,
263 &self.meta.identifier(glean),
264 self.meta.inner.lifetime,
265 ) {
266 Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
267 _ => None,
268 }
269 }
270
271 pub fn test_get_num_recorded_errors(&self, error: ErrorType) -> i32 {
283 crate::block_on_dispatcher();
284
285 crate::core::with_glean(|glean| {
286 test_get_num_recorded_errors(glean, self.meta(), error).unwrap_or(0)
287 })
288 }
289
290 pub fn start_buffer(&self) -> LocalMemoryDistribution<'_> {
295 LocalMemoryDistribution::new(self)
296 }
297
298 fn commit_histogram(&self, histogram: Histogram<Functional>, errors: usize) {
299 let metric = self.clone();
300 crate::launch_with_glean(move |glean| {
301 if errors > 0 {
302 let msg = format!("Accumulated {} samples larger than 1TB", errors);
303 record_error(
304 glean,
305 &metric.meta,
306 ErrorType::InvalidValue,
307 msg,
308 Some(errors as i32),
309 );
310 }
311
312 glean
313 .storage()
314 .record_with(glean, &metric.meta, move |old_value| {
315 let mut hist = match old_value {
316 Some(Metric::MemoryDistribution(hist)) => hist,
317 _ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
318 };
319
320 hist.merge(&histogram);
321 Metric::MemoryDistribution(hist)
322 });
323 });
324 }
325}
326
327impl TestGetValue<DistributionData> for MemoryDistributionMetric {
328 fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
343 crate::block_on_dispatcher();
344 crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
345 }
346}
347
348#[derive(Debug)]
353pub struct LocalMemoryDistribution<'a> {
354 histogram: Histogram<Functional>,
355 metric: &'a MemoryDistributionMetric,
356 errors: usize,
357}
358
359impl<'a> LocalMemoryDistribution<'a> {
360 fn new(metric: &'a MemoryDistributionMetric) -> Self {
362 let histogram = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
363 Self {
364 histogram,
365 metric,
366 errors: 0,
367 }
368 }
369
370 pub fn accumulate(&mut self, sample: u64) {
378 let mut sample = self.metric.memory_unit.as_bytes(sample);
379 if sample > MAX_BYTES {
380 self.errors += 1;
381 sample = MAX_BYTES;
382 }
383 self.histogram.accumulate(sample)
384 }
385
386 pub fn abandon(mut self) {
388 self.histogram.clear();
390 }
391}
392
393impl Drop for LocalMemoryDistribution<'_> {
394 fn drop(&mut self) {
395 if self.histogram.is_empty() {
396 return;
397 }
398
399 let buffer = mem::replace(&mut self.histogram, Histogram::functional(0.0, 0.0));
402 self.metric.commit_histogram(buffer, self.errors);
403 }
404}