glean_core/core/mod.rs
1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at https://mozilla.org/MPL/2.0/.
4
5use std::collections::HashMap;
6use std::fs::{self, File};
7use std::io::{self, Write};
8use std::path::{Path, PathBuf};
9use std::sync::atomic::{AtomicU8, Ordering};
10use std::sync::{Arc, Mutex};
11use std::time::Duration;
12
13use chrono::{DateTime, FixedOffset};
14use malloc_size_of_derive::MallocSizeOf;
15use once_cell::sync::OnceCell;
16use uuid::Uuid;
17
18use crate::database::Database;
19use crate::debug::DebugOptions;
20use crate::error::ClientIdFileError;
21use crate::event_database::EventDatabase;
22use crate::internal_metrics::{
23 AdditionalMetrics, CoreMetrics, DatabaseMetrics, ExceptionState, HealthMetrics,
24};
25use crate::internal_pings::InternalPings;
26use crate::metrics::{
27 self, ExperimentMetric, Metric, MetricType, PingType, RecordedExperiment, RemoteSettingsConfig,
28};
29use crate::ping::PingMaker;
30use crate::storage::{StorageManager, INTERNAL_STORAGE};
31use crate::upload::{PingUploadManager, PingUploadTask, UploadResult, UploadTaskAction};
32use crate::util::{local_now_with_offset, sanitize_application_id};
33use crate::{
34 scheduler, system, AttributionMetrics, CommonMetricData, DistributionMetrics, ErrorKind,
35 InternalConfiguration, Lifetime, PingRateLimit, Result, DEFAULT_MAX_EVENTS,
36 GLEAN_SCHEMA_VERSION, GLEAN_VERSION, KNOWN_CLIENT_ID,
37};
38
39const CLIENT_ID_PLAIN_FILENAME: &str = "client_id.txt";
40static GLEAN: OnceCell<Mutex<Glean>> = OnceCell::new();
41
42pub fn global_glean() -> Option<&'static Mutex<Glean>> {
43 GLEAN.get()
44}
45
46/// Sets or replaces the global Glean object.
47pub fn setup_glean(glean: Glean) -> Result<()> {
48 // The `OnceCell` type wrapping our Glean is thread-safe and can only be set once.
49 // Therefore even if our check for it being empty succeeds, setting it could fail if a
50 // concurrent thread is quicker in setting it.
51 // However this will not cause a bigger problem, as the second `set` operation will just fail.
52 // We can log it and move on.
53 //
54 // For all wrappers this is not a problem, as the Glean object is intialized exactly once on
55 // calling `initialize` on the global singleton and further operations check that it has been
56 // initialized.
57 if GLEAN.get().is_none() {
58 if GLEAN.set(Mutex::new(glean)).is_err() {
59 log::warn!(
60 "Global Glean object is initialized already. This probably happened concurrently."
61 )
62 }
63 } else {
64 // We allow overriding the global Glean object to support test mode.
65 // In test mode the Glean object is fully destroyed and recreated.
66 // This all happens behind a mutex and is therefore also thread-safe..
67 let mut lock = GLEAN.get().unwrap().lock().unwrap();
68 *lock = glean;
69 }
70 Ok(())
71}
72
73/// Execute `f` passing the global Glean object.
74///
75/// Panics if the global Glean object has not been set.
76pub fn with_glean<F, R>(f: F) -> R
77where
78 F: FnOnce(&Glean) -> R,
79{
80 let glean = global_glean().expect("Global Glean object not initialized");
81 let lock = glean.lock().unwrap();
82 f(&lock)
83}
84
85/// Execute `f` passing the global Glean object mutable.
86///
87/// Panics if the global Glean object has not been set.
88pub fn with_glean_mut<F, R>(f: F) -> R
89where
90 F: FnOnce(&mut Glean) -> R,
91{
92 let glean = global_glean().expect("Global Glean object not initialized");
93 let mut lock = glean.lock().unwrap();
94 f(&mut lock)
95}
96
97/// Execute `f` passing the global Glean object if it has been set.
98///
99/// Returns `None` if the global Glean object has not been set.
100/// Returns `Some(T)` otherwise.
101pub fn with_opt_glean<F, R>(f: F) -> Option<R>
102where
103 F: FnOnce(&Glean) -> R,
104{
105 let glean = global_glean()?;
106 let lock = glean.lock().unwrap();
107 Some(f(&lock))
108}
109
110/// The object holding meta information about a Glean instance.
111///
112/// ## Example
113///
114/// Create a new Glean instance, register a ping, record a simple counter and then send the final
115/// ping.
116///
117/// ```rust,no_run
118/// # use glean_core::{Glean, InternalConfiguration, CommonMetricData, metrics::*};
119/// let cfg = InternalConfiguration {
120/// data_path: "/tmp/glean".into(),
121/// application_id: "glean.sample.app".into(),
122/// language_binding_name: "Rust".into(),
123/// upload_enabled: true,
124/// max_events: None,
125/// delay_ping_lifetime_io: false,
126/// app_build: "".into(),
127/// use_core_mps: false,
128/// trim_data_to_registered_pings: false,
129/// log_level: None,
130/// rate_limit: None,
131/// enable_event_timestamps: true,
132/// experimentation_id: None,
133/// enable_internal_pings: true,
134/// ping_schedule: Default::default(),
135/// ping_lifetime_threshold: 1000,
136/// ping_lifetime_max_time: 2000,
137/// };
138/// let mut glean = Glean::new(cfg).unwrap();
139/// let ping = PingType::new("sample", true, false, true, true, true, vec![], vec![], true, vec![]);
140/// glean.register_ping_type(&ping);
141///
142/// let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
143/// name: "calls".into(),
144/// category: "local".into(),
145/// send_in_pings: vec!["sample".into()],
146/// ..Default::default()
147/// });
148///
149/// call_counter.add_sync(&glean, 1);
150///
151/// ping.submit_sync(&glean, None);
152/// ```
153///
154/// ## Note
155///
156/// In specific language bindings, this is usually wrapped in a singleton and all metric recording goes to a single instance of this object.
157/// In the Rust core, it is possible to create multiple instances, which is used in testing.
158#[derive(Debug, MallocSizeOf)]
159pub struct Glean {
160 upload_enabled: bool,
161 pub(crate) data_store: Option<Database>,
162 event_data_store: EventDatabase,
163 pub(crate) core_metrics: CoreMetrics,
164 pub(crate) additional_metrics: AdditionalMetrics,
165 pub(crate) database_metrics: DatabaseMetrics,
166 pub(crate) health_metrics: HealthMetrics,
167 pub(crate) internal_pings: InternalPings,
168 data_path: PathBuf,
169 application_id: String,
170 ping_registry: HashMap<String, PingType>,
171 #[ignore_malloc_size_of = "external non-allocating type"]
172 start_time: DateTime<FixedOffset>,
173 max_events: u32,
174 is_first_run: bool,
175 pub(crate) upload_manager: PingUploadManager,
176 debug: DebugOptions,
177 pub(crate) app_build: String,
178 pub(crate) schedule_metrics_pings: bool,
179 pub(crate) remote_settings_epoch: AtomicU8,
180 #[ignore_malloc_size_of = "TODO: Expose Glean's inner memory allocations (bug 1960592)"]
181 pub(crate) remote_settings_config: Arc<Mutex<RemoteSettingsConfig>>,
182 pub(crate) with_timestamps: bool,
183 pub(crate) ping_schedule: HashMap<String, Vec<String>>,
184}
185
186impl Glean {
187 /// Creates and initializes a new Glean object for use in a subprocess.
188 ///
189 /// Importantly, this will not send any pings at startup, since that
190 /// sort of management should only happen in the main process.
191 pub fn new_for_subprocess(cfg: &InternalConfiguration, scan_directories: bool) -> Result<Self> {
192 log::info!("Creating new Glean v{}", GLEAN_VERSION);
193
194 let application_id = sanitize_application_id(&cfg.application_id);
195 if application_id.is_empty() {
196 return Err(ErrorKind::InvalidConfig.into());
197 }
198
199 let data_path = Path::new(&cfg.data_path);
200 let event_data_store = EventDatabase::new(data_path)?;
201
202 // Create an upload manager with rate limiting of 15 pings every 60 seconds.
203 let mut upload_manager = PingUploadManager::new(&cfg.data_path, &cfg.language_binding_name);
204 let rate_limit = cfg.rate_limit.as_ref().unwrap_or(&PingRateLimit {
205 seconds_per_interval: 60,
206 pings_per_interval: 15,
207 });
208 upload_manager.set_rate_limiter(
209 rate_limit.seconds_per_interval,
210 rate_limit.pings_per_interval,
211 );
212
213 // We only scan the pending ping directories when calling this from a subprocess,
214 // when calling this from ::new we need to scan the directories after dealing with the upload state.
215 if scan_directories {
216 let _scanning_thread = upload_manager.scan_pending_pings_directories(false);
217 }
218
219 let start_time = local_now_with_offset();
220 let mut this = Self {
221 upload_enabled: cfg.upload_enabled,
222 // In the subprocess, we want to avoid accessing the database entirely.
223 // The easiest way to ensure that is to just not initialize it.
224 data_store: None,
225 event_data_store,
226 core_metrics: CoreMetrics::new(),
227 additional_metrics: AdditionalMetrics::new(),
228 database_metrics: DatabaseMetrics::new(),
229 health_metrics: HealthMetrics::new(),
230 internal_pings: InternalPings::new(cfg.enable_internal_pings),
231 upload_manager,
232 data_path: PathBuf::from(&cfg.data_path),
233 application_id,
234 ping_registry: HashMap::new(),
235 start_time,
236 max_events: cfg.max_events.unwrap_or(DEFAULT_MAX_EVENTS),
237 is_first_run: false,
238 debug: DebugOptions::new(),
239 app_build: cfg.app_build.to_string(),
240 // Subprocess doesn't use "metrics" pings so has no need for a scheduler.
241 schedule_metrics_pings: false,
242 remote_settings_epoch: AtomicU8::new(0),
243 remote_settings_config: Arc::new(Mutex::new(RemoteSettingsConfig::new())),
244 with_timestamps: cfg.enable_event_timestamps,
245 ping_schedule: cfg.ping_schedule.clone(),
246 };
247
248 // Ensuring these pings are registered.
249 let pings = this.internal_pings.clone();
250 this.register_ping_type(&pings.baseline);
251 this.register_ping_type(&pings.metrics);
252 this.register_ping_type(&pings.events);
253 this.register_ping_type(&pings.health);
254 this.register_ping_type(&pings.deletion_request);
255
256 Ok(this)
257 }
258
259 /// Creates and initializes a new Glean object.
260 ///
261 /// This will create the necessary directories and files in
262 /// [`cfg.data_path`](InternalConfiguration::data_path). This will also initialize
263 /// the core metrics.
264 pub fn new(cfg: InternalConfiguration) -> Result<Self> {
265 let mut glean = Self::new_for_subprocess(&cfg, false)?;
266
267 // Creating the data store creates the necessary path as well.
268 // If that fails we bail out and don't initialize further.
269 let data_path = Path::new(&cfg.data_path);
270 let ping_lifetime_threshold = cfg.ping_lifetime_threshold as usize;
271 let ping_lifetime_max_time = Duration::from_millis(cfg.ping_lifetime_max_time);
272 glean.data_store = Some(Database::new(
273 data_path,
274 cfg.delay_ping_lifetime_io,
275 ping_lifetime_threshold,
276 ping_lifetime_max_time,
277 )?);
278
279 // This code references different states from the "Client ID recovery" flowchart.
280 // See https://mozilla.github.io/glean/dev/core/internal/client_id_recovery.html for details.
281
282 // We don't have the database yet when we first encounter the error,
283 // so we store it and apply it later.
284 // state (a)
285 let stored_client_id = match glean.client_id_from_file() {
286 Ok(id) if id == *KNOWN_CLIENT_ID => {
287 glean
288 .health_metrics
289 .file_read_error
290 .get("c0ffee-in-file")
291 .add_sync(&glean, 1);
292 None
293 }
294 Ok(id) => Some(id),
295 Err(ClientIdFileError::NotFound) => {
296 // That's ok, the file might just not exist yet.
297 None
298 }
299 Err(ClientIdFileError::PermissionDenied) => {
300 // state (b)
301 // Uhm ... who removed our permission?
302 glean
303 .health_metrics
304 .file_read_error
305 .get("permission-denied")
306 .add_sync(&glean, 1);
307 None
308 }
309 Err(ClientIdFileError::ParseError(e)) => {
310 // state (b)
311 log::trace!("reading cliend_id.txt. Could not parse into UUID: {e}");
312 glean
313 .health_metrics
314 .file_read_error
315 .get("parse")
316 .add_sync(&glean, 1);
317 None
318 }
319 Err(ClientIdFileError::IoError(e)) => {
320 // state (b)
321 // We can't handle other IO errors (most couldn't occur on this operation anyway)
322 log::trace!("reading client_id.txt. Unexpected io error: {e}");
323 glean
324 .health_metrics
325 .file_read_error
326 .get("io")
327 .add_sync(&glean, 1);
328 None
329 }
330 };
331
332 {
333 let data_store = glean.data_store.as_ref().unwrap();
334 let db_load_sizes = data_store.load_sizes.as_ref().unwrap();
335 let new_size = db_load_sizes.new.unwrap_or(0);
336
337 // If we have a client ID on disk, we check the database
338 if let Some(stored_client_id) = stored_client_id {
339 // state (c)
340 if new_size <= 0 {
341 log::trace!("no database. database size={new_size}. stored_client_id={stored_client_id}");
342 // state (d)
343 glean
344 .health_metrics
345 .recovered_client_id
346 .set_from_uuid_sync(&glean, stored_client_id);
347 glean
348 .health_metrics
349 .exception_state
350 .set_sync(&glean, ExceptionState::EmptyDb);
351
352 // state (e) -- mitigation: store recovered client ID in DB
353 } else {
354 let db_client_id = glean
355 .core_metrics
356 .client_id
357 .get_value(&glean, Some("glean_client_info"));
358
359 match db_client_id {
360 None => {
361 // state (f)
362 log::trace!("no client_id in DB. stored_client_id={stored_client_id}");
363 glean
364 .health_metrics
365 .exception_state
366 .set_sync(&glean, ExceptionState::RegenDb);
367
368 // state (e) -- mitigation: store recovered client ID in DB
369 }
370 Some(db_client_id) if db_client_id == *KNOWN_CLIENT_ID => {
371 // state (i)
372 log::trace!(
373 "c0ffee client_id in DB, stored_client_id={stored_client_id}"
374 );
375 glean
376 .health_metrics
377 .recovered_client_id
378 .set_from_uuid_sync(&glean, stored_client_id);
379 glean
380 .health_metrics
381 .exception_state
382 .set_sync(&glean, ExceptionState::C0ffeeInDb);
383
384 // If we have a recovered client ID we also overwrite the database.
385 // state (e)
386 }
387 Some(db_client_id) if db_client_id == stored_client_id => {
388 // all valid. nothing to do
389 log::trace!("database consistent. db_client_id == stored_client_id: {db_client_id}");
390 }
391 Some(db_client_id) => {
392 // state (g)
393 log::trace!(
394 "client_id mismatch. db_client_id{db_client_id}, stored_client_id={stored_client_id}. Overwriting file with db's client_id."
395 );
396 glean
397 .health_metrics
398 .recovered_client_id
399 .set_from_uuid_sync(&glean, stored_client_id);
400 glean
401 .health_metrics
402 .exception_state
403 .set_sync(&glean, ExceptionState::ClientIdMismatch);
404
405 // state (h)
406 glean.store_client_id_with_reporting(
407 db_client_id,
408 "client_id mismatch will re-occur.",
409 );
410 }
411 }
412 }
413 } else {
414 log::trace!("No stored client ID. Database might have it.");
415
416 let db_client_id = glean
417 .core_metrics
418 .client_id
419 .get_value(&glean, Some("glean_client_info"));
420 if let Some(db_client_id) = db_client_id {
421 // state (h)
422 glean.store_client_id_with_reporting(
423 db_client_id,
424 "Might happen on next init then.",
425 );
426 } else {
427 log::trace!("Database has no client ID either. We might be fresh!");
428 }
429 }
430 }
431
432 // Set experimentation identifier (if any)
433 if let Some(experimentation_id) = &cfg.experimentation_id {
434 glean
435 .additional_metrics
436 .experimentation_id
437 .set_sync(&glean, experimentation_id.to_string());
438 }
439
440 // The upload enabled flag may have changed since the last run, for
441 // example by the changing of a config file.
442 if cfg.upload_enabled {
443 // If upload is enabled, just follow the normal code path to
444 // instantiate the core metrics.
445 glean.on_upload_enabled();
446 } else {
447 // If upload is disabled, then clear the metrics
448 // but do not send a deletion request ping.
449 // If we have run before, and we have an old client_id,
450 // do the full upload disabled operations to clear metrics
451 // and send a deletion request ping.
452 match glean
453 .core_metrics
454 .client_id
455 .get_value(&glean, Some("glean_client_info"))
456 {
457 None => glean.clear_metrics(),
458 Some(uuid) => {
459 if let Err(e) = glean.remove_stored_client_id() {
460 log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
461 }
462 if uuid == *KNOWN_CLIENT_ID {
463 // Previously Glean kept the KNOWN_CLIENT_ID stored.
464 // Let's ensure we erase it now.
465 if let Some(data) = glean.data_store.as_ref() {
466 _ = data.remove_single_metric(
467 Lifetime::User,
468 "glean_client_info",
469 "client_id",
470 );
471 }
472 } else {
473 // Temporarily enable uploading so we can submit a
474 // deletion request ping.
475 glean.upload_enabled = true;
476 glean.on_upload_disabled(true);
477 }
478 }
479 }
480 }
481
482 // We set this only for non-subprocess situations.
483 // If internal pings are disabled, we don't set up the MPS either,
484 // it wouldn't send any data anyway.
485 glean.schedule_metrics_pings = cfg.enable_internal_pings && cfg.use_core_mps;
486
487 // We only scan the pendings pings directories **after** dealing with the upload state.
488 // If upload is disabled, we delete all pending pings files
489 // and we need to do that **before** scanning the pending pings folder
490 // to ensure we don't enqueue pings before their files are deleted.
491 let _scanning_thread = glean.upload_manager.scan_pending_pings_directories(true);
492
493 Ok(glean)
494 }
495
496 /// For tests make it easy to create a Glean object using only the required configuration.
497 #[cfg(test)]
498 pub(crate) fn with_options(
499 data_path: &str,
500 application_id: &str,
501 upload_enabled: bool,
502 enable_internal_pings: bool,
503 ) -> Self {
504 let cfg = InternalConfiguration {
505 data_path: data_path.into(),
506 application_id: application_id.into(),
507 language_binding_name: "Rust".into(),
508 upload_enabled,
509 max_events: None,
510 delay_ping_lifetime_io: false,
511 app_build: "Unknown".into(),
512 use_core_mps: false,
513 trim_data_to_registered_pings: false,
514 log_level: None,
515 rate_limit: None,
516 enable_event_timestamps: true,
517 experimentation_id: None,
518 enable_internal_pings,
519 ping_schedule: Default::default(),
520 ping_lifetime_threshold: 0,
521 ping_lifetime_max_time: 0,
522 };
523
524 let mut glean = Self::new(cfg).unwrap();
525
526 // Disable all upload manager policies for testing
527 glean.upload_manager = PingUploadManager::no_policy(data_path);
528
529 glean
530 }
531
532 /// Destroys the database.
533 ///
534 /// After this Glean needs to be reinitialized.
535 pub fn destroy_db(&mut self) {
536 self.data_store = None;
537 }
538
539 fn client_id_file_path(&self) -> PathBuf {
540 self.data_path.join(CLIENT_ID_PLAIN_FILENAME)
541 }
542
543 /// Write the client ID to a separate plain file on disk
544 ///
545 /// Use `store_client_id_with_reporting` to handle the error cases.
546 fn store_client_id(&self, client_id: Uuid) -> Result<(), ClientIdFileError> {
547 let mut fp = File::create(self.client_id_file_path())?;
548
549 let mut buffer = Uuid::encode_buffer();
550 let uuid_str = client_id.hyphenated().encode_lower(&mut buffer);
551 fp.write_all(uuid_str.as_bytes())?;
552 fp.sync_all()?;
553
554 Ok(())
555 }
556
557 /// Write the client ID to a separate plain file on disk
558 ///
559 /// When an error occurs an error message is logged and the error is counted in a metric.
560 fn store_client_id_with_reporting(&self, client_id: Uuid, msg: &str) {
561 if let Err(err) = self.store_client_id(client_id) {
562 log::error!(
563 "Could not write {client_id} to state file. {} Error: {err}",
564 msg
565 );
566 match err {
567 ClientIdFileError::NotFound => {
568 self.health_metrics
569 .file_write_error
570 .get("not-found")
571 .add_sync(self, 1);
572 }
573 ClientIdFileError::PermissionDenied => {
574 self.health_metrics
575 .file_write_error
576 .get("permission-denied")
577 .add_sync(self, 1);
578 }
579 ClientIdFileError::IoError(..) => {
580 self.health_metrics
581 .file_write_error
582 .get("io")
583 .add_sync(self, 1);
584 }
585 ClientIdFileError::ParseError(..) => {
586 log::error!("Parse error encountered on file write. This is impossible.");
587 }
588 }
589 }
590 }
591
592 /// Try to load a client ID from the plain file on disk.
593 fn client_id_from_file(&self) -> Result<Uuid, ClientIdFileError> {
594 let uuid_str = fs::read_to_string(self.client_id_file_path())?;
595 // We don't write a newline, but we still trim it. Who knows who else touches that file by accident.
596 // We're also a bit more lenient in what we accept here:
597 // uppercase, lowercase, with or without dashes, urn, braced (and whatever else `Uuid`
598 // parses by default).
599 let uuid = Uuid::try_parse(uuid_str.trim_end())?;
600 Ok(uuid)
601 }
602
603 /// Remove the stored client ID from disk.
604 /// Should only be called when the client ID is also removed from the database.
605 fn remove_stored_client_id(&self) -> Result<(), ClientIdFileError> {
606 match fs::remove_file(self.client_id_file_path()) {
607 Ok(()) => Ok(()),
608 Err(e) if e.kind() == io::ErrorKind::NotFound => {
609 // File was already missing. No need to report that.
610 Ok(())
611 }
612 Err(e) => Err(e.into()),
613 }
614 }
615
616 /// Initializes the core metrics managed by Glean's Rust core.
617 fn initialize_core_metrics(&mut self) {
618 let need_new_client_id = match self
619 .core_metrics
620 .client_id
621 .get_value(self, Some("glean_client_info"))
622 {
623 None => true,
624 Some(uuid) => uuid == *KNOWN_CLIENT_ID,
625 };
626 if need_new_client_id {
627 let new_clientid = self.core_metrics.client_id.generate_and_set_sync(self);
628 self.store_client_id_with_reporting(new_clientid, "New client in database only.");
629 }
630
631 if self
632 .core_metrics
633 .first_run_date
634 .get_value(self, "glean_client_info")
635 .is_none()
636 {
637 self.core_metrics.first_run_date.set_sync(self, None);
638 // The `first_run_date` field is generated on the very first run
639 // and persisted across upload toggling. We can assume that, the only
640 // time it is set, that's indeed our "first run".
641 self.is_first_run = true;
642 }
643
644 self.set_application_lifetime_core_metrics();
645 }
646
647 /// Initializes the database metrics managed by Glean's Rust core.
648 fn initialize_database_metrics(&mut self) {
649 log::trace!("Initializing database metrics");
650
651 if let Some(size) = self
652 .data_store
653 .as_ref()
654 .and_then(|database| database.file_size())
655 {
656 log::trace!("Database file size: {}", size.get());
657 self.database_metrics
658 .size
659 .accumulate_sync(self, size.get() as i64)
660 }
661
662 if let Some(rkv_load_state) = self
663 .data_store
664 .as_ref()
665 .and_then(|database| database.rkv_load_state())
666 {
667 self.database_metrics
668 .rkv_load_error
669 .set_sync(self, rkv_load_state)
670 }
671
672 if let Some(load_sizes) = self
673 .data_store
674 .as_mut()
675 .and_then(|database| database.load_sizes())
676 {
677 self.database_metrics.load_sizes.set_sync(
678 self,
679 serde_json::to_value(load_sizes).unwrap_or(serde_json::json!({})),
680 );
681 }
682 }
683
684 /// Signals that the environment is ready to submit pings.
685 ///
686 /// Should be called when Glean is initialized to the point where it can correctly assemble pings.
687 /// Usually called from the language binding after all of the core metrics have been set
688 /// and the ping types have been registered.
689 ///
690 /// # Arguments
691 ///
692 /// * `trim_data_to_registered_pings` - Whether we should limit to storing data only for
693 /// data belonging to pings previously registered via `register_ping_type`.
694 ///
695 /// # Returns
696 ///
697 /// Whether the "events" ping was submitted.
698 pub fn on_ready_to_submit_pings(&mut self, trim_data_to_registered_pings: bool) -> bool {
699 // When upload is disabled on init we already clear out metrics.
700 // However at that point not all pings are registered and so we keep that data around.
701 // By the time we would be ready to submit we try again cleaning out metrics from
702 // now-known pings.
703 if !self.upload_enabled {
704 log::debug!("on_ready_to_submit_pings. let's clear pings once again.");
705 self.clear_metrics();
706 }
707
708 self.event_data_store
709 .flush_pending_events_on_startup(self, trim_data_to_registered_pings)
710 }
711
712 /// Sets whether upload is enabled or not.
713 ///
714 /// When uploading is disabled, metrics aren't recorded at all and no
715 /// data is uploaded.
716 ///
717 /// When disabling, all pending metrics, events and queued pings are cleared.
718 ///
719 /// When enabling, the core Glean metrics are recreated.
720 ///
721 /// If the value of this flag is not actually changed, this is a no-op.
722 ///
723 /// # Arguments
724 ///
725 /// * `flag` - When true, enable metric collection.
726 ///
727 /// # Returns
728 ///
729 /// Whether the flag was different from the current value,
730 /// and actual work was done to clear or reinstate metrics.
731 pub fn set_upload_enabled(&mut self, flag: bool) -> bool {
732 log::info!("Upload enabled: {:?}", flag);
733
734 if self.upload_enabled != flag {
735 if flag {
736 self.on_upload_enabled();
737 } else {
738 self.on_upload_disabled(false);
739 }
740 true
741 } else {
742 false
743 }
744 }
745
746 /// Enable or disable a ping.
747 ///
748 /// Disabling a ping causes all data for that ping to be removed from storage
749 /// and all pending pings of that type to be deleted.
750 ///
751 /// **Note**: Do not use directly. Call `PingType::set_enabled` instead.
752 #[doc(hidden)]
753 pub fn set_ping_enabled(&mut self, ping: &PingType, enabled: bool) {
754 ping.store_enabled(enabled);
755 if !enabled {
756 if let Some(data) = self.data_store.as_ref() {
757 _ = data.clear_ping_lifetime_storage(ping.name());
758 _ = data.clear_lifetime_storage(Lifetime::User, ping.name());
759 _ = data.clear_lifetime_storage(Lifetime::Application, ping.name());
760 }
761 let ping_maker = PingMaker::new();
762 let disabled_pings = &[ping.name()][..];
763 if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), disabled_pings) {
764 log::warn!("Error clearing pending pings: {}", err);
765 }
766 }
767 }
768
769 /// Determines whether upload is enabled.
770 ///
771 /// When upload is disabled, no data will be recorded.
772 pub fn is_upload_enabled(&self) -> bool {
773 self.upload_enabled
774 }
775
776 /// Check if a ping is enabled.
777 ///
778 /// Note that some internal "ping" names are considered to be always enabled.
779 ///
780 /// If a ping is not known to Glean ("unregistered") it is always considered disabled.
781 /// If a ping is known, it can be enabled/disabled at any point.
782 /// Only data for enabled pings is recorded.
783 /// Disabled pings are never submitted.
784 pub fn is_ping_enabled(&self, ping: &str) -> bool {
785 // We "abuse" pings/storage names for internal data.
786 const DEFAULT_ENABLED: &[&str] = &[
787 "glean_client_info",
788 "glean_internal_info",
789 // for `experimentation_id`.
790 // That should probably have gone into `glean_internal_info` instead.
791 "all-pings",
792 ];
793
794 // `client_info`-like stuff is always enabled.
795 if DEFAULT_ENABLED.contains(&ping) {
796 return true;
797 }
798
799 let Some(ping) = self.ping_registry.get(ping) else {
800 log::trace!("Unknown ping {ping}. Assuming disabled.");
801 return false;
802 };
803
804 ping.enabled(self)
805 }
806
807 /// Handles the changing of state from upload disabled to enabled.
808 ///
809 /// Should only be called when the state actually changes.
810 ///
811 /// The `upload_enabled` flag is set to true and the core Glean metrics are
812 /// recreated.
813 fn on_upload_enabled(&mut self) {
814 self.upload_enabled = true;
815 self.initialize_core_metrics();
816 self.initialize_database_metrics();
817 }
818
819 /// Handles the changing of state from upload enabled to disabled.
820 ///
821 /// Should only be called when the state actually changes.
822 ///
823 /// A deletion_request ping is sent, all pending metrics, events and queued
824 /// pings are cleared, and the client_id is set to KNOWN_CLIENT_ID.
825 /// Afterward, the upload_enabled flag is set to false.
826 fn on_upload_disabled(&mut self, during_init: bool) {
827 // The upload_enabled flag should be true here, or the deletion ping
828 // won't be submitted.
829 let reason = if during_init {
830 Some("at_init")
831 } else {
832 Some("set_upload_enabled")
833 };
834 if !self
835 .internal_pings
836 .deletion_request
837 .submit_sync(self, reason)
838 {
839 log::error!("Failed to submit deletion-request ping on optout.");
840 }
841 self.clear_metrics();
842 self.upload_enabled = false;
843 }
844
845 /// Clear any pending metrics when telemetry is disabled.
846 fn clear_metrics(&mut self) {
847 // Clear the pending pings queue and acquire the lock
848 // so that it can't be accessed until this function is done.
849 let _lock = self.upload_manager.clear_ping_queue();
850
851 // Clear any pending pings that follow `collection_enabled`.
852 let ping_maker = PingMaker::new();
853 let disabled_pings = self
854 .ping_registry
855 .iter()
856 .filter(|&(_ping_name, ping)| ping.follows_collection_enabled())
857 .map(|(ping_name, _ping)| &ping_name[..])
858 .collect::<Vec<_>>();
859 if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), &disabled_pings) {
860 log::warn!("Error clearing pending pings: {}", err);
861 }
862
863 if let Err(e) = self.remove_stored_client_id() {
864 log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
865 }
866
867 // Delete all stored metrics.
868 // Note that this also includes the ping sequence numbers, so it has
869 // the effect of resetting those to their initial values.
870 if let Some(data) = self.data_store.as_ref() {
871 _ = data.clear_lifetime_storage(Lifetime::User, "glean_internal_info");
872 _ = data.remove_single_metric(Lifetime::User, "glean_client_info", "client_id");
873 for (ping_name, ping) in &self.ping_registry {
874 if ping.follows_collection_enabled() {
875 _ = data.clear_ping_lifetime_storage(ping_name);
876 _ = data.clear_lifetime_storage(Lifetime::User, ping_name);
877 _ = data.clear_lifetime_storage(Lifetime::Application, ping_name);
878 }
879 }
880 }
881 if let Err(err) = self.event_data_store.clear_all() {
882 log::warn!("Error clearing pending events: {}", err);
883 }
884
885 // This does not clear the experiments store (which isn't managed by the
886 // StorageEngineManager), since doing so would mean we would have to have the
887 // application tell us again which experiments are active if telemetry is
888 // re-enabled.
889 }
890
891 /// Gets the application ID as specified on instantiation.
892 pub fn get_application_id(&self) -> &str {
893 &self.application_id
894 }
895
896 /// Gets the data path of this instance.
897 pub fn get_data_path(&self) -> &Path {
898 &self.data_path
899 }
900
901 /// Gets a handle to the database.
902 #[track_caller] // If this fails we're interested in the caller.
903 pub fn storage(&self) -> &Database {
904 self.data_store.as_ref().expect("No database found")
905 }
906
907 /// Gets an optional handle to the database.
908 pub fn storage_opt(&self) -> Option<&Database> {
909 self.data_store.as_ref()
910 }
911
912 /// Gets a handle to the event database.
913 pub fn event_storage(&self) -> &EventDatabase {
914 &self.event_data_store
915 }
916
917 pub(crate) fn with_timestamps(&self) -> bool {
918 self.with_timestamps
919 }
920
921 /// Gets the maximum number of events to store before sending a ping.
922 pub fn get_max_events(&self) -> usize {
923 let remote_settings_config = self.remote_settings_config.lock().unwrap();
924
925 if let Some(max_events) = remote_settings_config.event_threshold {
926 max_events as usize
927 } else {
928 self.max_events as usize
929 }
930 }
931
932 /// Gets the next task for an uploader.
933 ///
934 /// This can be one of:
935 ///
936 /// * [`Wait`](PingUploadTask::Wait) - which means the requester should ask
937 /// again later;
938 /// * [`Upload(PingRequest)`](PingUploadTask::Upload) - which means there is
939 /// a ping to upload. This wraps the actual request object;
940 /// * [`Done`](PingUploadTask::Done) - which means requester should stop
941 /// asking for now.
942 ///
943 /// # Returns
944 ///
945 /// A [`PingUploadTask`] representing the next task.
946 pub fn get_upload_task(&self) -> PingUploadTask {
947 self.upload_manager.get_upload_task(self, self.log_pings())
948 }
949
950 /// Processes the response from an attempt to upload a ping.
951 ///
952 /// # Arguments
953 ///
954 /// * `uuid` - The UUID of the ping in question.
955 /// * `status` - The upload result.
956 pub fn process_ping_upload_response(
957 &self,
958 uuid: &str,
959 status: UploadResult,
960 ) -> UploadTaskAction {
961 self.upload_manager
962 .process_ping_upload_response(self, uuid, status)
963 }
964
965 /// Takes a snapshot for the given store and optionally clear it.
966 ///
967 /// # Arguments
968 ///
969 /// * `store_name` - The store to snapshot.
970 /// * `clear_store` - Whether to clear the store after snapshotting.
971 ///
972 /// # Returns
973 ///
974 /// The snapshot in a string encoded as JSON. If the snapshot is empty, returns an empty string.
975 pub fn snapshot(&mut self, store_name: &str, clear_store: bool) -> String {
976 StorageManager
977 .snapshot(self.storage(), store_name, clear_store)
978 .unwrap_or_else(|| String::from(""))
979 }
980
981 pub(crate) fn make_path(&self, ping_name: &str, doc_id: &str) -> String {
982 format!(
983 "/submit/{}/{}/{}/{}",
984 self.get_application_id(),
985 ping_name,
986 GLEAN_SCHEMA_VERSION,
987 doc_id
988 )
989 }
990
991 /// Collects and submits a ping by name for eventual uploading.
992 ///
993 /// The ping content is assembled as soon as possible, but upload is not
994 /// guaranteed to happen immediately, as that depends on the upload policies.
995 ///
996 /// If the ping currently contains no content, it will not be sent,
997 /// unless it is configured to be sent if empty.
998 ///
999 /// # Arguments
1000 ///
1001 /// * `ping_name` - The name of the ping to submit
1002 /// * `reason` - A reason code to include in the ping
1003 ///
1004 /// # Returns
1005 ///
1006 /// Whether the ping was succesfully assembled and queued.
1007 ///
1008 /// # Errors
1009 ///
1010 /// If collecting or writing the ping to disk failed.
1011 pub fn submit_ping_by_name(&self, ping_name: &str, reason: Option<&str>) -> bool {
1012 match self.get_ping_by_name(ping_name) {
1013 None => {
1014 log::error!("Attempted to submit unknown ping '{}'", ping_name);
1015 false
1016 }
1017 Some(ping) => ping.submit_sync(self, reason),
1018 }
1019 }
1020
1021 /// Gets a [`PingType`] by name.
1022 ///
1023 /// # Returns
1024 ///
1025 /// The [`PingType`] of a ping if the given name was registered before, [`None`]
1026 /// otherwise.
1027 pub fn get_ping_by_name(&self, ping_name: &str) -> Option<&PingType> {
1028 self.ping_registry.get(ping_name)
1029 }
1030
1031 /// Register a new [`PingType`](metrics/struct.PingType.html).
1032 pub fn register_ping_type(&mut self, ping: &PingType) {
1033 if self.ping_registry.contains_key(ping.name()) {
1034 log::debug!("Duplicate ping named '{}'", ping.name())
1035 }
1036
1037 self.ping_registry
1038 .insert(ping.name().to_string(), ping.clone());
1039 }
1040
1041 /// Gets a list of currently registered ping names.
1042 ///
1043 /// # Returns
1044 ///
1045 /// The list of ping names that are currently registered.
1046 pub fn get_registered_ping_names(&self) -> Vec<&str> {
1047 self.ping_registry.keys().map(String::as_str).collect()
1048 }
1049
1050 /// Get create time of the Glean object.
1051 pub(crate) fn start_time(&self) -> DateTime<FixedOffset> {
1052 self.start_time
1053 }
1054
1055 /// Indicates that an experiment is running.
1056 ///
1057 /// Glean will then add an experiment annotation to the environment
1058 /// which is sent with pings. This information is not persisted between runs.
1059 ///
1060 /// # Arguments
1061 ///
1062 /// * `experiment_id` - The id of the active experiment (maximum 30 bytes).
1063 /// * `branch` - The experiment branch (maximum 30 bytes).
1064 /// * `extra` - Optional metadata to output with the ping.
1065 pub fn set_experiment_active(
1066 &self,
1067 experiment_id: String,
1068 branch: String,
1069 extra: HashMap<String, String>,
1070 ) {
1071 let metric = ExperimentMetric::new(self, experiment_id);
1072 metric.set_active_sync(self, branch, extra);
1073 }
1074
1075 /// Indicates that an experiment is no longer running.
1076 ///
1077 /// # Arguments
1078 ///
1079 /// * `experiment_id` - The id of the active experiment to deactivate (maximum 30 bytes).
1080 pub fn set_experiment_inactive(&self, experiment_id: String) {
1081 let metric = ExperimentMetric::new(self, experiment_id);
1082 metric.set_inactive_sync(self);
1083 }
1084
1085 /// **Test-only API (exported for FFI purposes).**
1086 ///
1087 /// Gets stored data for the requested experiment.
1088 ///
1089 /// # Arguments
1090 ///
1091 /// * `experiment_id` - The id of the active experiment (maximum 30 bytes).
1092 pub fn test_get_experiment_data(&self, experiment_id: String) -> Option<RecordedExperiment> {
1093 let metric = ExperimentMetric::new(self, experiment_id);
1094 metric.test_get_value(self)
1095 }
1096
1097 /// **Test-only API (exported for FFI purposes).**
1098 ///
1099 /// Gets stored experimentation id annotation.
1100 pub fn test_get_experimentation_id(&self) -> Option<String> {
1101 self.additional_metrics
1102 .experimentation_id
1103 .get_value(self, None)
1104 }
1105
1106 /// Set configuration to override the default state, typically initiated from a
1107 /// remote_settings experiment or rollout
1108 ///
1109 /// # Arguments
1110 ///
1111 /// * `cfg` - The stringified JSON representation of a `RemoteSettingsConfig` object
1112 pub fn apply_server_knobs_config(&self, cfg: RemoteSettingsConfig) {
1113 // Set the current RemoteSettingsConfig, keeping the lock until the epoch is
1114 // updated to prevent against reading a "new" config but an "old" epoch
1115 let mut remote_settings_config = self.remote_settings_config.lock().unwrap();
1116
1117 // Merge the exising metrics configuration with the supplied one
1118 remote_settings_config
1119 .metrics_enabled
1120 .extend(cfg.metrics_enabled);
1121
1122 // Merge the exising ping configuration with the supplied one
1123 remote_settings_config
1124 .pings_enabled
1125 .extend(cfg.pings_enabled);
1126
1127 remote_settings_config.event_threshold = cfg.event_threshold;
1128
1129 // Update remote_settings epoch
1130 self.remote_settings_epoch.fetch_add(1, Ordering::SeqCst);
1131 }
1132
1133 /// Persists [`Lifetime::Ping`] data that might be in memory in case
1134 /// [`delay_ping_lifetime_io`](InternalConfiguration::delay_ping_lifetime_io) is set
1135 /// or was set at a previous time.
1136 ///
1137 /// If there is no data to persist, this function does nothing.
1138 pub fn persist_ping_lifetime_data(&self) -> Result<()> {
1139 if let Some(data) = self.data_store.as_ref() {
1140 return data.persist_ping_lifetime_data();
1141 }
1142
1143 Ok(())
1144 }
1145
1146 /// Sets internally-handled application lifetime metrics.
1147 fn set_application_lifetime_core_metrics(&self) {
1148 self.core_metrics.os.set_sync(self, system::OS);
1149 }
1150
1151 /// **This is not meant to be used directly.**
1152 ///
1153 /// Clears all the metrics that have [`Lifetime::Application`].
1154 pub fn clear_application_lifetime_metrics(&self) {
1155 log::trace!("Clearing Lifetime::Application metrics");
1156 if let Some(data) = self.data_store.as_ref() {
1157 data.clear_lifetime(Lifetime::Application);
1158 }
1159
1160 // Set internally handled app lifetime metrics again.
1161 self.set_application_lifetime_core_metrics();
1162 }
1163
1164 /// Whether or not this is the first run on this profile.
1165 pub fn is_first_run(&self) -> bool {
1166 self.is_first_run
1167 }
1168
1169 /// Sets a debug view tag.
1170 ///
1171 /// This will return `false` in case `value` is not a valid tag.
1172 ///
1173 /// When the debug view tag is set, pings are sent with a `X-Debug-ID` header with the value of the tag
1174 /// and are sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
1175 ///
1176 /// # Arguments
1177 ///
1178 /// * `value` - A valid HTTP header value. Must match the regex: "[a-zA-Z0-9-]{1,20}".
1179 pub fn set_debug_view_tag(&mut self, value: &str) -> bool {
1180 self.debug.debug_view_tag.set(value.into())
1181 }
1182
1183 /// Return the value for the debug view tag or [`None`] if it hasn't been set.
1184 ///
1185 /// The `debug_view_tag` may be set from an environment variable
1186 /// (`GLEAN_DEBUG_VIEW_TAG`) or through the [`set_debug_view_tag`](Glean::set_debug_view_tag) function.
1187 pub fn debug_view_tag(&self) -> Option<&String> {
1188 self.debug.debug_view_tag.get()
1189 }
1190
1191 /// Sets source tags.
1192 ///
1193 /// This will return `false` in case `value` contains invalid tags.
1194 ///
1195 /// Ping tags will show in the destination datasets, after ingestion.
1196 ///
1197 /// **Note** If one or more tags are invalid, all tags are ignored.
1198 ///
1199 /// # Arguments
1200 ///
1201 /// * `value` - A vector of at most 5 valid HTTP header values. Individual tags must match the regex: "[a-zA-Z0-9-]{1,20}".
1202 pub fn set_source_tags(&mut self, value: Vec<String>) -> bool {
1203 self.debug.source_tags.set(value)
1204 }
1205
1206 /// Return the value for the source tags or [`None`] if it hasn't been set.
1207 ///
1208 /// The `source_tags` may be set from an environment variable (`GLEAN_SOURCE_TAGS`)
1209 /// or through the [`set_source_tags`](Glean::set_source_tags) function.
1210 pub(crate) fn source_tags(&self) -> Option<&Vec<String>> {
1211 self.debug.source_tags.get()
1212 }
1213
1214 /// Sets the log pings debug option.
1215 ///
1216 /// This will return `false` in case we are unable to set the option.
1217 ///
1218 /// When the log pings debug option is `true`,
1219 /// we log the payload of all succesfully assembled pings.
1220 ///
1221 /// # Arguments
1222 ///
1223 /// * `value` - The value of the log pings option
1224 pub fn set_log_pings(&mut self, value: bool) -> bool {
1225 self.debug.log_pings.set(value)
1226 }
1227
1228 /// Return the value for the log pings debug option or `false` if it hasn't been set.
1229 ///
1230 /// The `log_pings` option may be set from an environment variable (`GLEAN_LOG_PINGS`)
1231 /// or through the `set_log_pings` function.
1232 pub fn log_pings(&self) -> bool {
1233 self.debug.log_pings.get().copied().unwrap_or(false)
1234 }
1235
1236 fn get_dirty_bit_metric(&self) -> metrics::BooleanMetric {
1237 metrics::BooleanMetric::new(CommonMetricData {
1238 name: "dirtybit".into(),
1239 // We don't need a category, the name is already unique
1240 category: "".into(),
1241 send_in_pings: vec![INTERNAL_STORAGE.into()],
1242 lifetime: Lifetime::User,
1243 ..Default::default()
1244 })
1245 }
1246
1247 /// **This is not meant to be used directly.**
1248 ///
1249 /// Sets the value of a "dirty flag" in the permanent storage.
1250 ///
1251 /// The "dirty flag" is meant to have the following behaviour, implemented
1252 /// by the consumers of the FFI layer:
1253 ///
1254 /// - on mobile: set to `false` when going to background or shutting down,
1255 /// set to `true` at startup and when going to foreground.
1256 /// - on non-mobile platforms: set to `true` at startup and `false` at
1257 /// shutdown.
1258 ///
1259 /// At startup, before setting its new value, if the "dirty flag" value is
1260 /// `true`, then Glean knows it did not exit cleanly and can implement
1261 /// coping mechanisms (e.g. sending a `baseline` ping).
1262 pub fn set_dirty_flag(&self, new_value: bool) {
1263 self.get_dirty_bit_metric().set_sync(self, new_value);
1264 }
1265
1266 /// **This is not meant to be used directly.**
1267 ///
1268 /// Checks the stored value of the "dirty flag".
1269 pub fn is_dirty_flag_set(&self) -> bool {
1270 let dirty_bit_metric = self.get_dirty_bit_metric();
1271 match StorageManager.snapshot_metric(
1272 self.storage(),
1273 INTERNAL_STORAGE,
1274 &dirty_bit_metric.meta().identifier(self),
1275 dirty_bit_metric.meta().inner.lifetime,
1276 ) {
1277 Some(Metric::Boolean(b)) => b,
1278 _ => false,
1279 }
1280 }
1281
1282 /// Performs the collection/cleanup operations required by becoming active.
1283 ///
1284 /// This functions generates a baseline ping with reason `active`
1285 /// and then sets the dirty bit.
1286 pub fn handle_client_active(&mut self) {
1287 if !self
1288 .internal_pings
1289 .baseline
1290 .submit_sync(self, Some("active"))
1291 {
1292 log::info!("baseline ping not submitted on active");
1293 }
1294
1295 self.set_dirty_flag(true);
1296 }
1297
1298 /// Performs the collection/cleanup operations required by becoming inactive.
1299 ///
1300 /// This functions generates a baseline and an events ping with reason
1301 /// `inactive` and then clears the dirty bit.
1302 pub fn handle_client_inactive(&mut self) {
1303 if !self
1304 .internal_pings
1305 .baseline
1306 .submit_sync(self, Some("inactive"))
1307 {
1308 log::info!("baseline ping not submitted on inactive");
1309 }
1310
1311 if !self
1312 .internal_pings
1313 .events
1314 .submit_sync(self, Some("inactive"))
1315 {
1316 log::info!("events ping not submitted on inactive");
1317 }
1318
1319 self.set_dirty_flag(false);
1320 }
1321
1322 /// **Test-only API (exported for FFI purposes).**
1323 ///
1324 /// Deletes all stored metrics.
1325 ///
1326 /// Note that this also includes the ping sequence numbers, so it has
1327 /// the effect of resetting those to their initial values.
1328 pub fn test_clear_all_stores(&self) {
1329 if let Some(data) = self.data_store.as_ref() {
1330 data.clear_all()
1331 }
1332 // We don't care about this failing, maybe the data does just not exist.
1333 let _ = self.event_data_store.clear_all();
1334 }
1335
1336 /// Instructs the Metrics Ping Scheduler's thread to exit cleanly.
1337 /// If Glean was configured with `use_core_mps: false`, this has no effect.
1338 pub fn cancel_metrics_ping_scheduler(&self) {
1339 if self.schedule_metrics_pings {
1340 scheduler::cancel();
1341 }
1342 }
1343
1344 /// Instructs the Metrics Ping Scheduler to being scheduling metrics pings.
1345 /// If Glean wsa configured with `use_core_mps: false`, this has no effect.
1346 pub fn start_metrics_ping_scheduler(&self) {
1347 if self.schedule_metrics_pings {
1348 scheduler::schedule(self);
1349 }
1350 }
1351
1352 /// Updates attribution fields with new values.
1353 /// AttributionMetrics fields with `None` values will not overwrite older values.
1354 pub fn update_attribution(&self, attribution: AttributionMetrics) {
1355 if let Some(source) = attribution.source {
1356 self.core_metrics.attribution_source.set_sync(self, source);
1357 }
1358 if let Some(medium) = attribution.medium {
1359 self.core_metrics.attribution_medium.set_sync(self, medium);
1360 }
1361 if let Some(campaign) = attribution.campaign {
1362 self.core_metrics
1363 .attribution_campaign
1364 .set_sync(self, campaign);
1365 }
1366 if let Some(term) = attribution.term {
1367 self.core_metrics.attribution_term.set_sync(self, term);
1368 }
1369 if let Some(content) = attribution.content {
1370 self.core_metrics
1371 .attribution_content
1372 .set_sync(self, content);
1373 }
1374 }
1375
1376 /// **TEST-ONLY Method**
1377 ///
1378 /// Returns the current attribution metrics.
1379 pub fn test_get_attribution(&self) -> AttributionMetrics {
1380 AttributionMetrics {
1381 source: self
1382 .core_metrics
1383 .attribution_source
1384 .get_value(self, Some("glean_client_info")),
1385 medium: self
1386 .core_metrics
1387 .attribution_medium
1388 .get_value(self, Some("glean_client_info")),
1389 campaign: self
1390 .core_metrics
1391 .attribution_campaign
1392 .get_value(self, Some("glean_client_info")),
1393 term: self
1394 .core_metrics
1395 .attribution_term
1396 .get_value(self, Some("glean_client_info")),
1397 content: self
1398 .core_metrics
1399 .attribution_content
1400 .get_value(self, Some("glean_client_info")),
1401 }
1402 }
1403
1404 /// Updates distribution fields with new values.
1405 /// DistributionMetrics fields with `None` values will not overwrite older values.
1406 pub fn update_distribution(&self, distribution: DistributionMetrics) {
1407 if let Some(name) = distribution.name {
1408 self.core_metrics.distribution_name.set_sync(self, name);
1409 }
1410 }
1411
1412 /// **TEST-ONLY Method**
1413 ///
1414 /// Returns the current distribution metrics.
1415 pub fn test_get_distribution(&self) -> DistributionMetrics {
1416 DistributionMetrics {
1417 name: self
1418 .core_metrics
1419 .distribution_name
1420 .get_value(self, Some("glean_client_info")),
1421 }
1422 }
1423}