glean_core/core/mod.rs
1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at https://mozilla.org/MPL/2.0/.
4
5use std::collections::HashMap;
6use std::fs::{self, File};
7use std::io::{self, Write};
8use std::path::{Path, PathBuf};
9use std::sync::atomic::{AtomicU8, Ordering};
10use std::sync::{Arc, Mutex};
11use std::time::Duration;
12
13use chrono::{DateTime, FixedOffset};
14use malloc_size_of_derive::MallocSizeOf;
15use once_cell::sync::OnceCell;
16use uuid::Uuid;
17
18use crate::database::Database;
19use crate::debug::DebugOptions;
20use crate::error::ClientIdFileError;
21use crate::event_database::EventDatabase;
22use crate::internal_metrics::{
23 AdditionalMetrics, CoreMetrics, DatabaseMetrics, ExceptionState, HealthMetrics,
24};
25use crate::internal_pings::InternalPings;
26use crate::metrics::{
27 self, ExperimentMetric, Metric, MetricType, PingType, RecordedExperiment, RemoteSettingsConfig,
28};
29use crate::ping::PingMaker;
30use crate::storage::{StorageManager, INTERNAL_STORAGE};
31use crate::upload::{PingUploadManager, PingUploadTask, UploadResult, UploadTaskAction};
32use crate::util::{local_now_with_offset, sanitize_application_id};
33use crate::{
34 scheduler, system, AttributionMetrics, CommonMetricData, DistributionMetrics, ErrorKind,
35 InternalConfiguration, Lifetime, PingRateLimit, Result, DEFAULT_MAX_EVENTS,
36 GLEAN_SCHEMA_VERSION, GLEAN_VERSION, KNOWN_CLIENT_ID,
37};
38
39const CLIENT_ID_PLAIN_FILENAME: &str = "client_id.txt";
40static GLEAN: OnceCell<Mutex<Glean>> = OnceCell::new();
41
42/// Rate limiting defaults
43/// 15 pings every 60 seconds.
44pub const DEFAULT_SECONDS_PER_INTERVAL: u64 = 60;
45pub const DEFAULT_PINGS_PER_INTERVAL: u32 = 15;
46
47pub fn global_glean() -> Option<&'static Mutex<Glean>> {
48 GLEAN.get()
49}
50
51/// Sets or replaces the global Glean object.
52pub fn setup_glean(glean: Glean) -> Result<()> {
53 // The `OnceCell` type wrapping our Glean is thread-safe and can only be set once.
54 // Therefore even if our check for it being empty succeeds, setting it could fail if a
55 // concurrent thread is quicker in setting it.
56 // However this will not cause a bigger problem, as the second `set` operation will just fail.
57 // We can log it and move on.
58 //
59 // For all wrappers this is not a problem, as the Glean object is intialized exactly once on
60 // calling `initialize` on the global singleton and further operations check that it has been
61 // initialized.
62 if GLEAN.get().is_none() {
63 if GLEAN.set(Mutex::new(glean)).is_err() {
64 log::warn!(
65 "Global Glean object is initialized already. This probably happened concurrently."
66 )
67 }
68 } else {
69 // We allow overriding the global Glean object to support test mode.
70 // In test mode the Glean object is fully destroyed and recreated.
71 // This all happens behind a mutex and is therefore also thread-safe..
72 let mut lock = GLEAN.get().unwrap().lock().unwrap();
73 *lock = glean;
74 }
75 Ok(())
76}
77
78/// Execute `f` passing the global Glean object.
79///
80/// Panics if the global Glean object has not been set.
81pub fn with_glean<F, R>(f: F) -> R
82where
83 F: FnOnce(&Glean) -> R,
84{
85 let glean = global_glean().expect("Global Glean object not initialized");
86 let lock = glean.lock().unwrap();
87 f(&lock)
88}
89
90/// Execute `f` passing the global Glean object mutable.
91///
92/// Panics if the global Glean object has not been set.
93pub fn with_glean_mut<F, R>(f: F) -> R
94where
95 F: FnOnce(&mut Glean) -> R,
96{
97 let glean = global_glean().expect("Global Glean object not initialized");
98 let mut lock = glean.lock().unwrap();
99 f(&mut lock)
100}
101
102/// Execute `f` passing the global Glean object if it has been set.
103///
104/// Returns `None` if the global Glean object has not been set.
105/// Returns `Some(T)` otherwise.
106pub fn with_opt_glean<F, R>(f: F) -> Option<R>
107where
108 F: FnOnce(&Glean) -> R,
109{
110 let glean = global_glean()?;
111 let lock = glean.lock().unwrap();
112 Some(f(&lock))
113}
114
115/// The object holding meta information about a Glean instance.
116///
117/// ## Example
118///
119/// Create a new Glean instance, register a ping, record a simple counter and then send the final
120/// ping.
121///
122/// ```rust,no_run
123/// # use glean_core::{Glean, InternalConfiguration, CommonMetricData, metrics::*};
124/// let cfg = InternalConfiguration {
125/// data_path: "/tmp/glean".into(),
126/// application_id: "glean.sample.app".into(),
127/// language_binding_name: "Rust".into(),
128/// upload_enabled: true,
129/// max_events: None,
130/// delay_ping_lifetime_io: false,
131/// app_build: "".into(),
132/// use_core_mps: false,
133/// trim_data_to_registered_pings: false,
134/// log_level: None,
135/// rate_limit: None,
136/// enable_event_timestamps: true,
137/// experimentation_id: None,
138/// enable_internal_pings: true,
139/// ping_schedule: Default::default(),
140/// ping_lifetime_threshold: 1000,
141/// ping_lifetime_max_time: 2000,
142/// };
143/// let mut glean = Glean::new(cfg).unwrap();
144/// let ping = PingType::new("sample", true, false, true, true, true, vec![], vec![], true, vec![]);
145/// glean.register_ping_type(&ping);
146///
147/// let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
148/// name: "calls".into(),
149/// category: "local".into(),
150/// send_in_pings: vec!["sample".into()],
151/// ..Default::default()
152/// });
153///
154/// call_counter.add_sync(&glean, 1);
155///
156/// ping.submit_sync(&glean, None);
157/// ```
158///
159/// ## Note
160///
161/// In specific language bindings, this is usually wrapped in a singleton and all metric recording goes to a single instance of this object.
162/// In the Rust core, it is possible to create multiple instances, which is used in testing.
163#[derive(Debug, MallocSizeOf)]
164pub struct Glean {
165 upload_enabled: bool,
166 pub(crate) data_store: Option<Database>,
167 event_data_store: EventDatabase,
168 pub(crate) core_metrics: CoreMetrics,
169 pub(crate) additional_metrics: AdditionalMetrics,
170 pub(crate) database_metrics: DatabaseMetrics,
171 pub(crate) health_metrics: HealthMetrics,
172 pub(crate) internal_pings: InternalPings,
173 data_path: PathBuf,
174 application_id: String,
175 ping_registry: HashMap<String, PingType>,
176 #[ignore_malloc_size_of = "external non-allocating type"]
177 start_time: DateTime<FixedOffset>,
178 max_events: u32,
179 is_first_run: bool,
180 pub(crate) upload_manager: PingUploadManager,
181 debug: DebugOptions,
182 pub(crate) app_build: String,
183 pub(crate) schedule_metrics_pings: bool,
184 pub(crate) remote_settings_epoch: AtomicU8,
185 #[ignore_malloc_size_of = "TODO: Expose Glean's inner memory allocations (bug 1960592)"]
186 pub(crate) remote_settings_config: Arc<Mutex<RemoteSettingsConfig>>,
187 pub(crate) with_timestamps: bool,
188 pub(crate) ping_schedule: HashMap<String, Vec<String>>,
189}
190
191impl Glean {
192 /// Creates and initializes a new Glean object for use in a subprocess.
193 ///
194 /// Importantly, this will not send any pings at startup, since that
195 /// sort of management should only happen in the main process.
196 pub fn new_for_subprocess(cfg: &InternalConfiguration, scan_directories: bool) -> Result<Self> {
197 log::info!("Creating new Glean v{}", GLEAN_VERSION);
198
199 let application_id = sanitize_application_id(&cfg.application_id);
200 if application_id.is_empty() {
201 return Err(ErrorKind::InvalidConfig.into());
202 }
203
204 let data_path = Path::new(&cfg.data_path);
205 let event_data_store = EventDatabase::new(data_path)?;
206
207 // Create an upload manager with rate limiting of 15 pings every 60 seconds.
208 let mut upload_manager = PingUploadManager::new(&cfg.data_path, &cfg.language_binding_name);
209 let rate_limit = cfg.rate_limit.as_ref().unwrap_or(&PingRateLimit {
210 seconds_per_interval: DEFAULT_SECONDS_PER_INTERVAL,
211 pings_per_interval: DEFAULT_PINGS_PER_INTERVAL,
212 });
213 upload_manager.set_rate_limiter(
214 rate_limit.seconds_per_interval,
215 rate_limit.pings_per_interval,
216 );
217
218 // We only scan the pending ping directories when calling this from a subprocess,
219 // when calling this from ::new we need to scan the directories after dealing with the upload state.
220 if scan_directories {
221 let _scanning_thread = upload_manager.scan_pending_pings_directories(false);
222 }
223
224 let start_time = local_now_with_offset();
225 let mut this = Self {
226 upload_enabled: cfg.upload_enabled,
227 // In the subprocess, we want to avoid accessing the database entirely.
228 // The easiest way to ensure that is to just not initialize it.
229 data_store: None,
230 event_data_store,
231 core_metrics: CoreMetrics::new(),
232 additional_metrics: AdditionalMetrics::new(),
233 database_metrics: DatabaseMetrics::new(),
234 health_metrics: HealthMetrics::new(),
235 internal_pings: InternalPings::new(cfg.enable_internal_pings),
236 upload_manager,
237 data_path: PathBuf::from(&cfg.data_path),
238 application_id,
239 ping_registry: HashMap::new(),
240 start_time,
241 max_events: cfg.max_events.unwrap_or(DEFAULT_MAX_EVENTS),
242 is_first_run: false,
243 debug: DebugOptions::new(),
244 app_build: cfg.app_build.to_string(),
245 // Subprocess doesn't use "metrics" pings so has no need for a scheduler.
246 schedule_metrics_pings: false,
247 remote_settings_epoch: AtomicU8::new(0),
248 remote_settings_config: Arc::new(Mutex::new(RemoteSettingsConfig::new())),
249 with_timestamps: cfg.enable_event_timestamps,
250 ping_schedule: cfg.ping_schedule.clone(),
251 };
252
253 // Ensuring these pings are registered.
254 let pings = this.internal_pings.clone();
255 this.register_ping_type(&pings.baseline);
256 this.register_ping_type(&pings.metrics);
257 this.register_ping_type(&pings.events);
258 this.register_ping_type(&pings.health);
259 this.register_ping_type(&pings.deletion_request);
260
261 Ok(this)
262 }
263
264 /// Creates and initializes a new Glean object.
265 ///
266 /// This will create the necessary directories and files in
267 /// [`cfg.data_path`](InternalConfiguration::data_path). This will also initialize
268 /// the core metrics.
269 pub fn new(cfg: InternalConfiguration) -> Result<Self> {
270 let mut glean = Self::new_for_subprocess(&cfg, false)?;
271
272 // Creating the data store creates the necessary path as well.
273 // If that fails we bail out and don't initialize further.
274 let data_path = Path::new(&cfg.data_path);
275 let ping_lifetime_threshold = cfg.ping_lifetime_threshold as usize;
276 let ping_lifetime_max_time = Duration::from_millis(cfg.ping_lifetime_max_time);
277 glean.data_store = Some(Database::new(
278 data_path,
279 cfg.delay_ping_lifetime_io,
280 ping_lifetime_threshold,
281 ping_lifetime_max_time,
282 )?);
283
284 // This code references different states from the "Client ID recovery" flowchart.
285 // See https://mozilla.github.io/glean/dev/core/internal/client_id_recovery.html for details.
286
287 // We don't have the database yet when we first encounter the error,
288 // so we store it and apply it later.
289 // state (a)
290 let stored_client_id = match glean.client_id_from_file() {
291 Ok(id) if id == *KNOWN_CLIENT_ID => {
292 glean
293 .health_metrics
294 .file_read_error
295 .get("c0ffee-in-file")
296 .add_sync(&glean, 1);
297 None
298 }
299 Ok(id) => Some(id),
300 Err(ClientIdFileError::NotFound) => {
301 // That's ok, the file might just not exist yet.
302 glean
303 .health_metrics
304 .file_read_error
305 .get("file-not-found")
306 .add_sync(&glean, 1);
307 None
308 }
309 Err(ClientIdFileError::PermissionDenied) => {
310 // state (b)
311 // Uhm ... who removed our permission?
312 glean
313 .health_metrics
314 .file_read_error
315 .get("permission-denied")
316 .add_sync(&glean, 1);
317 None
318 }
319 Err(ClientIdFileError::ParseError(e)) => {
320 // state (b)
321 log::trace!("reading cliend_id.txt. Could not parse into UUID: {e}");
322 glean
323 .health_metrics
324 .file_read_error
325 .get("parse")
326 .add_sync(&glean, 1);
327 None
328 }
329 Err(ClientIdFileError::IoError(e)) => {
330 // state (b)
331 // We can't handle other IO errors (most couldn't occur on this operation anyway)
332 log::trace!("reading client_id.txt. Unexpected io error: {e}");
333 glean
334 .health_metrics
335 .file_read_error
336 .get("io")
337 .add_sync(&glean, 1);
338 None
339 }
340 };
341
342 {
343 let data_store = glean.data_store.as_ref().unwrap();
344 let file_size = data_store.file_size.map(|n| n.get()).unwrap_or(0);
345
346 // If we have a client ID on disk, we check the database
347 if let Some(stored_client_id) = stored_client_id {
348 // state (c)
349 if file_size == 0 {
350 log::trace!("no database. database size={file_size}. stored_client_id={stored_client_id}");
351 // state (d)
352 glean
353 .health_metrics
354 .recovered_client_id
355 .set_from_uuid_sync(&glean, stored_client_id);
356 glean
357 .health_metrics
358 .exception_state
359 .set_sync(&glean, ExceptionState::EmptyDb);
360
361 // state (e) -- mitigation: store recovered client ID in DB
362 glean
363 .core_metrics
364 .client_id
365 .set_from_uuid_sync(&glean, stored_client_id);
366 } else {
367 let db_client_id = glean
368 .core_metrics
369 .client_id
370 .get_value(&glean, Some("glean_client_info"));
371
372 match db_client_id {
373 None => {
374 // state (f)
375 log::trace!("no client_id in DB. stored_client_id={stored_client_id}");
376 glean
377 .health_metrics
378 .exception_state
379 .set_sync(&glean, ExceptionState::RegenDb);
380
381 // state (e) -- mitigation: store recovered client ID in DB
382 glean
383 .core_metrics
384 .client_id
385 .set_from_uuid_sync(&glean, stored_client_id);
386 }
387 Some(db_client_id) if db_client_id == *KNOWN_CLIENT_ID => {
388 // state (i)
389 log::trace!(
390 "c0ffee client_id in DB, stored_client_id={stored_client_id}"
391 );
392 glean
393 .health_metrics
394 .recovered_client_id
395 .set_from_uuid_sync(&glean, stored_client_id);
396 glean
397 .health_metrics
398 .exception_state
399 .set_sync(&glean, ExceptionState::C0ffeeInDb);
400
401 // If we have a recovered client ID we also overwrite the database.
402 // state (e)
403 glean
404 .core_metrics
405 .client_id
406 .set_from_uuid_sync(&glean, stored_client_id);
407 }
408 Some(db_client_id) if db_client_id == stored_client_id => {
409 // all valid. nothing to do
410 log::trace!("database consistent. db_client_id == stored_client_id: {db_client_id}");
411 }
412 Some(db_client_id) => {
413 // state (g)
414 log::trace!(
415 "client_id mismatch. db_client_id{db_client_id}, stored_client_id={stored_client_id}. Overwriting file with db's client_id."
416 );
417 glean
418 .health_metrics
419 .recovered_client_id
420 .set_from_uuid_sync(&glean, stored_client_id);
421 glean
422 .health_metrics
423 .exception_state
424 .set_sync(&glean, ExceptionState::ClientIdMismatch);
425
426 // state (h)
427 glean.store_client_id_with_reporting(
428 db_client_id,
429 "client_id mismatch will re-occur.",
430 );
431 }
432 }
433 }
434 } else {
435 log::trace!("No stored client ID. Database might have it.");
436
437 let db_client_id = glean
438 .core_metrics
439 .client_id
440 .get_value(&glean, Some("glean_client_info"));
441 if let Some(db_client_id) = db_client_id {
442 // state (h)
443 glean.store_client_id_with_reporting(
444 db_client_id,
445 "Might happen on next init then.",
446 );
447 } else {
448 log::trace!("Database has no client ID either. We might be fresh!");
449 }
450 }
451 }
452
453 // Set experimentation identifier (if any)
454 if let Some(experimentation_id) = &cfg.experimentation_id {
455 glean
456 .additional_metrics
457 .experimentation_id
458 .set_sync(&glean, experimentation_id.to_string());
459 }
460
461 // The upload enabled flag may have changed since the last run, for
462 // example by the changing of a config file.
463 if cfg.upload_enabled {
464 // If upload is enabled, just follow the normal code path to
465 // instantiate the core metrics.
466 glean.on_upload_enabled();
467 } else {
468 // If upload is disabled, then clear the metrics
469 // but do not send a deletion request ping.
470 // If we have run before, and we have an old client_id,
471 // do the full upload disabled operations to clear metrics
472 // and send a deletion request ping.
473 match glean
474 .core_metrics
475 .client_id
476 .get_value(&glean, Some("glean_client_info"))
477 {
478 None => glean.clear_metrics(),
479 Some(uuid) => {
480 if let Err(e) = glean.remove_stored_client_id() {
481 log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
482 }
483 if uuid == *KNOWN_CLIENT_ID {
484 // Previously Glean kept the KNOWN_CLIENT_ID stored.
485 // Let's ensure we erase it now.
486 if let Some(data) = glean.data_store.as_ref() {
487 _ = data.remove_single_metric(
488 Lifetime::User,
489 "glean_client_info",
490 "client_id",
491 );
492 }
493 } else {
494 // Temporarily enable uploading so we can submit a
495 // deletion request ping.
496 glean.upload_enabled = true;
497 glean.on_upload_disabled(true);
498 }
499 }
500 }
501 }
502
503 // We set this only for non-subprocess situations.
504 // If internal pings are disabled, we don't set up the MPS either,
505 // it wouldn't send any data anyway.
506 glean.schedule_metrics_pings = cfg.enable_internal_pings && cfg.use_core_mps;
507
508 // We only scan the pendings pings directories **after** dealing with the upload state.
509 // If upload is disabled, we delete all pending pings files
510 // and we need to do that **before** scanning the pending pings folder
511 // to ensure we don't enqueue pings before their files are deleted.
512 let _scanning_thread = glean.upload_manager.scan_pending_pings_directories(true);
513
514 Ok(glean)
515 }
516
517 /// For tests make it easy to create a Glean object using only the required configuration.
518 #[cfg(test)]
519 pub(crate) fn with_options(
520 data_path: &str,
521 application_id: &str,
522 upload_enabled: bool,
523 enable_internal_pings: bool,
524 ) -> Self {
525 let cfg = InternalConfiguration {
526 data_path: data_path.into(),
527 application_id: application_id.into(),
528 language_binding_name: "Rust".into(),
529 upload_enabled,
530 max_events: None,
531 delay_ping_lifetime_io: false,
532 app_build: "Unknown".into(),
533 use_core_mps: false,
534 trim_data_to_registered_pings: false,
535 log_level: None,
536 rate_limit: None,
537 enable_event_timestamps: true,
538 experimentation_id: None,
539 enable_internal_pings,
540 ping_schedule: Default::default(),
541 ping_lifetime_threshold: 0,
542 ping_lifetime_max_time: 0,
543 };
544
545 let mut glean = Self::new(cfg).unwrap();
546
547 // Disable all upload manager policies for testing
548 glean.upload_manager = PingUploadManager::no_policy(data_path);
549
550 glean
551 }
552
553 /// Destroys the database.
554 ///
555 /// After this Glean needs to be reinitialized.
556 pub fn destroy_db(&mut self) {
557 self.data_store = None;
558 }
559
560 fn client_id_file_path(&self) -> PathBuf {
561 self.data_path.join(CLIENT_ID_PLAIN_FILENAME)
562 }
563
564 /// Write the client ID to a separate plain file on disk
565 ///
566 /// Use `store_client_id_with_reporting` to handle the error cases.
567 fn store_client_id(&self, client_id: Uuid) -> Result<(), ClientIdFileError> {
568 let mut fp = File::create(self.client_id_file_path())?;
569
570 let mut buffer = Uuid::encode_buffer();
571 let uuid_str = client_id.hyphenated().encode_lower(&mut buffer);
572 fp.write_all(uuid_str.as_bytes())?;
573 fp.sync_all()?;
574
575 Ok(())
576 }
577
578 /// Write the client ID to a separate plain file on disk
579 ///
580 /// When an error occurs an error message is logged and the error is counted in a metric.
581 fn store_client_id_with_reporting(&self, client_id: Uuid, msg: &str) {
582 if let Err(err) = self.store_client_id(client_id) {
583 log::error!(
584 "Could not write {client_id} to state file. {} Error: {err}",
585 msg
586 );
587 match err {
588 ClientIdFileError::NotFound => {
589 self.health_metrics
590 .file_write_error
591 .get("not-found")
592 .add_sync(self, 1);
593 }
594 ClientIdFileError::PermissionDenied => {
595 self.health_metrics
596 .file_write_error
597 .get("permission-denied")
598 .add_sync(self, 1);
599 }
600 ClientIdFileError::IoError(..) => {
601 self.health_metrics
602 .file_write_error
603 .get("io")
604 .add_sync(self, 1);
605 }
606 ClientIdFileError::ParseError(..) => {
607 log::error!("Parse error encountered on file write. This is impossible.");
608 }
609 }
610 }
611 }
612
613 /// Try to load a client ID from the plain file on disk.
614 fn client_id_from_file(&self) -> Result<Uuid, ClientIdFileError> {
615 let uuid_str = fs::read_to_string(self.client_id_file_path())?;
616 // We don't write a newline, but we still trim it. Who knows who else touches that file by accident.
617 // We're also a bit more lenient in what we accept here:
618 // uppercase, lowercase, with or without dashes, urn, braced (and whatever else `Uuid`
619 // parses by default).
620 let uuid = Uuid::try_parse(uuid_str.trim_end())?;
621 Ok(uuid)
622 }
623
624 /// Remove the stored client ID from disk.
625 /// Should only be called when the client ID is also removed from the database.
626 fn remove_stored_client_id(&self) -> Result<(), ClientIdFileError> {
627 match fs::remove_file(self.client_id_file_path()) {
628 Ok(()) => Ok(()),
629 Err(e) if e.kind() == io::ErrorKind::NotFound => {
630 // File was already missing. No need to report that.
631 Ok(())
632 }
633 Err(e) => Err(e.into()),
634 }
635 }
636
637 /// Initializes the core metrics managed by Glean's Rust core.
638 fn initialize_core_metrics(&mut self) {
639 let need_new_client_id = match self
640 .core_metrics
641 .client_id
642 .get_value(self, Some("glean_client_info"))
643 {
644 None => true,
645 Some(uuid) => uuid == *KNOWN_CLIENT_ID,
646 };
647 if need_new_client_id {
648 let new_clientid = self.core_metrics.client_id.generate_and_set_sync(self);
649 self.store_client_id_with_reporting(new_clientid, "New client in database only.");
650 }
651
652 if self
653 .core_metrics
654 .first_run_date
655 .get_value(self, "glean_client_info")
656 .is_none()
657 {
658 self.core_metrics.first_run_date.set_sync(self, None);
659 // The `first_run_date` field is generated on the very first run
660 // and persisted across upload toggling. We can assume that, the only
661 // time it is set, that's indeed our "first run".
662 self.is_first_run = true;
663 }
664
665 self.set_application_lifetime_core_metrics();
666 }
667
668 /// Initializes the database metrics managed by Glean's Rust core.
669 fn initialize_database_metrics(&mut self) {
670 log::trace!("Initializing database metrics");
671
672 if let Some(size) = self
673 .data_store
674 .as_ref()
675 .and_then(|database| database.file_size())
676 {
677 log::trace!("Database file size: {}", size.get());
678 self.database_metrics
679 .size
680 .accumulate_sync(self, size.get() as i64)
681 }
682
683 if let Some(rkv_load_state) = self
684 .data_store
685 .as_ref()
686 .and_then(|database| database.rkv_load_state())
687 {
688 self.database_metrics
689 .rkv_load_error
690 .set_sync(self, rkv_load_state)
691 }
692 }
693
694 /// Signals that the environment is ready to submit pings.
695 ///
696 /// Should be called when Glean is initialized to the point where it can correctly assemble pings.
697 /// Usually called from the language binding after all of the core metrics have been set
698 /// and the ping types have been registered.
699 ///
700 /// # Arguments
701 ///
702 /// * `trim_data_to_registered_pings` - Whether we should limit to storing data only for
703 /// data belonging to pings previously registered via `register_ping_type`.
704 ///
705 /// # Returns
706 ///
707 /// Whether the "events" ping was submitted.
708 pub fn on_ready_to_submit_pings(&mut self, trim_data_to_registered_pings: bool) -> bool {
709 // When upload is disabled on init we already clear out metrics.
710 // However at that point not all pings are registered and so we keep that data around.
711 // By the time we would be ready to submit we try again cleaning out metrics from
712 // now-known pings.
713 if !self.upload_enabled {
714 log::debug!("on_ready_to_submit_pings. let's clear pings once again.");
715 self.clear_metrics();
716 }
717
718 self.event_data_store
719 .flush_pending_events_on_startup(self, trim_data_to_registered_pings)
720 }
721
722 /// Sets whether upload is enabled or not.
723 ///
724 /// When uploading is disabled, metrics aren't recorded at all and no
725 /// data is uploaded.
726 ///
727 /// When disabling, all pending metrics, events and queued pings are cleared.
728 ///
729 /// When enabling, the core Glean metrics are recreated.
730 ///
731 /// If the value of this flag is not actually changed, this is a no-op.
732 ///
733 /// # Arguments
734 ///
735 /// * `flag` - When true, enable metric collection.
736 ///
737 /// # Returns
738 ///
739 /// Whether the flag was different from the current value,
740 /// and actual work was done to clear or reinstate metrics.
741 pub fn set_upload_enabled(&mut self, flag: bool) -> bool {
742 log::info!("Upload enabled: {:?}", flag);
743
744 if self.upload_enabled != flag {
745 if flag {
746 self.on_upload_enabled();
747 } else {
748 self.on_upload_disabled(false);
749 }
750 true
751 } else {
752 false
753 }
754 }
755
756 /// Enable or disable a ping.
757 ///
758 /// Disabling a ping causes all data for that ping to be removed from storage
759 /// and all pending pings of that type to be deleted.
760 ///
761 /// **Note**: Do not use directly. Call `PingType::set_enabled` instead.
762 #[doc(hidden)]
763 pub fn set_ping_enabled(&mut self, ping: &PingType, enabled: bool) {
764 ping.store_enabled(enabled);
765 if !enabled {
766 if let Some(data) = self.data_store.as_ref() {
767 _ = data.clear_ping_lifetime_storage(ping.name());
768 _ = data.clear_lifetime_storage(Lifetime::User, ping.name());
769 _ = data.clear_lifetime_storage(Lifetime::Application, ping.name());
770 }
771 let ping_maker = PingMaker::new();
772 let disabled_pings = &[ping.name()][..];
773 if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), disabled_pings) {
774 log::warn!("Error clearing pending pings: {}", err);
775 }
776 }
777 }
778
779 /// Determines whether upload is enabled.
780 ///
781 /// When upload is disabled, no data will be recorded.
782 pub fn is_upload_enabled(&self) -> bool {
783 self.upload_enabled
784 }
785
786 /// Check if a ping is enabled.
787 ///
788 /// Note that some internal "ping" names are considered to be always enabled.
789 ///
790 /// If a ping is not known to Glean ("unregistered") it is always considered disabled.
791 /// If a ping is known, it can be enabled/disabled at any point.
792 /// Only data for enabled pings is recorded.
793 /// Disabled pings are never submitted.
794 pub fn is_ping_enabled(&self, ping: &str) -> bool {
795 // We "abuse" pings/storage names for internal data.
796 const DEFAULT_ENABLED: &[&str] = &[
797 "glean_client_info",
798 "glean_internal_info",
799 // for `experimentation_id`.
800 // That should probably have gone into `glean_internal_info` instead.
801 "all-pings",
802 ];
803
804 // `client_info`-like stuff is always enabled.
805 if DEFAULT_ENABLED.contains(&ping) {
806 return true;
807 }
808
809 let Some(ping) = self.ping_registry.get(ping) else {
810 log::trace!("Unknown ping {ping}. Assuming disabled.");
811 return false;
812 };
813
814 ping.enabled(self)
815 }
816
817 /// Handles the changing of state from upload disabled to enabled.
818 ///
819 /// Should only be called when the state actually changes.
820 ///
821 /// The `upload_enabled` flag is set to true and the core Glean metrics are
822 /// recreated.
823 fn on_upload_enabled(&mut self) {
824 self.upload_enabled = true;
825 self.initialize_core_metrics();
826 self.initialize_database_metrics();
827 }
828
829 /// Handles the changing of state from upload enabled to disabled.
830 ///
831 /// Should only be called when the state actually changes.
832 ///
833 /// A deletion_request ping is sent, all pending metrics, events and queued
834 /// pings are cleared, and the client_id is set to KNOWN_CLIENT_ID.
835 /// Afterward, the upload_enabled flag is set to false.
836 fn on_upload_disabled(&mut self, during_init: bool) {
837 // The upload_enabled flag should be true here, or the deletion ping
838 // won't be submitted.
839 let reason = if during_init {
840 Some("at_init")
841 } else {
842 Some("set_upload_enabled")
843 };
844 if !self
845 .internal_pings
846 .deletion_request
847 .submit_sync(self, reason)
848 {
849 log::error!("Failed to submit deletion-request ping on optout.");
850 }
851 self.clear_metrics();
852 self.upload_enabled = false;
853 }
854
855 /// Clear any pending metrics when telemetry is disabled.
856 fn clear_metrics(&mut self) {
857 // Clear the pending pings queue and acquire the lock
858 // so that it can't be accessed until this function is done.
859 let _lock = self.upload_manager.clear_ping_queue();
860
861 // Clear any pending pings that follow `collection_enabled`.
862 let ping_maker = PingMaker::new();
863 let disabled_pings = self
864 .ping_registry
865 .iter()
866 .filter(|&(_ping_name, ping)| ping.follows_collection_enabled())
867 .map(|(ping_name, _ping)| &ping_name[..])
868 .collect::<Vec<_>>();
869 if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), &disabled_pings) {
870 log::warn!("Error clearing pending pings: {}", err);
871 }
872
873 if let Err(e) = self.remove_stored_client_id() {
874 log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
875 }
876
877 // Delete all stored metrics.
878 // Note that this also includes the ping sequence numbers, so it has
879 // the effect of resetting those to their initial values.
880 if let Some(data) = self.data_store.as_ref() {
881 _ = data.clear_lifetime_storage(Lifetime::User, "glean_internal_info");
882 _ = data.remove_single_metric(Lifetime::User, "glean_client_info", "client_id");
883 for (ping_name, ping) in &self.ping_registry {
884 if ping.follows_collection_enabled() {
885 _ = data.clear_ping_lifetime_storage(ping_name);
886 _ = data.clear_lifetime_storage(Lifetime::User, ping_name);
887 _ = data.clear_lifetime_storage(Lifetime::Application, ping_name);
888 }
889 }
890 }
891 if let Err(err) = self.event_data_store.clear_all() {
892 log::warn!("Error clearing pending events: {}", err);
893 }
894
895 // This does not clear the experiments store (which isn't managed by the
896 // StorageEngineManager), since doing so would mean we would have to have the
897 // application tell us again which experiments are active if telemetry is
898 // re-enabled.
899 }
900
901 /// Gets the application ID as specified on instantiation.
902 pub fn get_application_id(&self) -> &str {
903 &self.application_id
904 }
905
906 /// Gets the data path of this instance.
907 pub fn get_data_path(&self) -> &Path {
908 &self.data_path
909 }
910
911 /// Gets a handle to the database.
912 #[track_caller] // If this fails we're interested in the caller.
913 pub fn storage(&self) -> &Database {
914 self.data_store.as_ref().expect("No database found")
915 }
916
917 /// Gets an optional handle to the database.
918 pub fn storage_opt(&self) -> Option<&Database> {
919 self.data_store.as_ref()
920 }
921
922 /// Gets a handle to the event database.
923 pub fn event_storage(&self) -> &EventDatabase {
924 &self.event_data_store
925 }
926
927 pub(crate) fn with_timestamps(&self) -> bool {
928 self.with_timestamps
929 }
930
931 /// Gets the maximum number of events to store before sending a ping.
932 pub fn get_max_events(&self) -> usize {
933 let remote_settings_config = self.remote_settings_config.lock().unwrap();
934
935 if let Some(max_events) = remote_settings_config.event_threshold {
936 max_events as usize
937 } else {
938 self.max_events as usize
939 }
940 }
941
942 /// Gets the next task for an uploader.
943 ///
944 /// This can be one of:
945 ///
946 /// * [`Wait`](PingUploadTask::Wait) - which means the requester should ask
947 /// again later;
948 /// * [`Upload(PingRequest)`](PingUploadTask::Upload) - which means there is
949 /// a ping to upload. This wraps the actual request object;
950 /// * [`Done`](PingUploadTask::Done) - which means requester should stop
951 /// asking for now.
952 ///
953 /// # Returns
954 ///
955 /// A [`PingUploadTask`] representing the next task.
956 pub fn get_upload_task(&self) -> PingUploadTask {
957 self.upload_manager.get_upload_task(self, self.log_pings())
958 }
959
960 /// Processes the response from an attempt to upload a ping.
961 ///
962 /// # Arguments
963 ///
964 /// * `uuid` - The UUID of the ping in question.
965 /// * `status` - The upload result.
966 pub fn process_ping_upload_response(
967 &self,
968 uuid: &str,
969 status: UploadResult,
970 ) -> UploadTaskAction {
971 self.upload_manager
972 .process_ping_upload_response(self, uuid, status)
973 }
974
975 /// Takes a snapshot for the given store and optionally clear it.
976 ///
977 /// # Arguments
978 ///
979 /// * `store_name` - The store to snapshot.
980 /// * `clear_store` - Whether to clear the store after snapshotting.
981 ///
982 /// # Returns
983 ///
984 /// The snapshot in a string encoded as JSON. If the snapshot is empty, returns an empty string.
985 pub fn snapshot(&mut self, store_name: &str, clear_store: bool) -> String {
986 StorageManager
987 .snapshot(self.storage(), store_name, clear_store)
988 .unwrap_or_else(|| String::from(""))
989 }
990
991 pub(crate) fn make_path(&self, ping_name: &str, doc_id: &str) -> String {
992 format!(
993 "/submit/{}/{}/{}/{}",
994 self.get_application_id(),
995 ping_name,
996 GLEAN_SCHEMA_VERSION,
997 doc_id
998 )
999 }
1000
1001 /// Collects and submits a ping by name for eventual uploading.
1002 ///
1003 /// The ping content is assembled as soon as possible, but upload is not
1004 /// guaranteed to happen immediately, as that depends on the upload policies.
1005 ///
1006 /// If the ping currently contains no content, it will not be sent,
1007 /// unless it is configured to be sent if empty.
1008 ///
1009 /// # Arguments
1010 ///
1011 /// * `ping_name` - The name of the ping to submit
1012 /// * `reason` - A reason code to include in the ping
1013 ///
1014 /// # Returns
1015 ///
1016 /// Whether the ping was succesfully assembled and queued.
1017 ///
1018 /// # Errors
1019 ///
1020 /// If collecting or writing the ping to disk failed.
1021 pub fn submit_ping_by_name(&self, ping_name: &str, reason: Option<&str>) -> bool {
1022 match self.get_ping_by_name(ping_name) {
1023 None => {
1024 log::error!("Attempted to submit unknown ping '{}'", ping_name);
1025 false
1026 }
1027 Some(ping) => ping.submit_sync(self, reason),
1028 }
1029 }
1030
1031 /// Gets a [`PingType`] by name.
1032 ///
1033 /// # Returns
1034 ///
1035 /// The [`PingType`] of a ping if the given name was registered before, [`None`]
1036 /// otherwise.
1037 pub fn get_ping_by_name(&self, ping_name: &str) -> Option<&PingType> {
1038 self.ping_registry.get(ping_name)
1039 }
1040
1041 /// Register a new [`PingType`](metrics/struct.PingType.html).
1042 pub fn register_ping_type(&mut self, ping: &PingType) {
1043 if self.ping_registry.contains_key(ping.name()) {
1044 log::debug!("Duplicate ping named '{}'", ping.name())
1045 }
1046
1047 self.ping_registry
1048 .insert(ping.name().to_string(), ping.clone());
1049 }
1050
1051 /// Gets a list of currently registered ping names.
1052 ///
1053 /// # Returns
1054 ///
1055 /// The list of ping names that are currently registered.
1056 pub fn get_registered_ping_names(&self) -> Vec<&str> {
1057 self.ping_registry.keys().map(String::as_str).collect()
1058 }
1059
1060 /// Get create time of the Glean object.
1061 pub(crate) fn start_time(&self) -> DateTime<FixedOffset> {
1062 self.start_time
1063 }
1064
1065 /// Indicates that an experiment is running.
1066 ///
1067 /// Glean will then add an experiment annotation to the environment
1068 /// which is sent with pings. This information is not persisted between runs.
1069 ///
1070 /// # Arguments
1071 ///
1072 /// * `experiment_id` - The id of the active experiment (maximum 30 bytes).
1073 /// * `branch` - The experiment branch (maximum 30 bytes).
1074 /// * `extra` - Optional metadata to output with the ping.
1075 pub fn set_experiment_active(
1076 &self,
1077 experiment_id: String,
1078 branch: String,
1079 extra: HashMap<String, String>,
1080 ) {
1081 let metric = ExperimentMetric::new(self, experiment_id);
1082 metric.set_active_sync(self, branch, extra);
1083 }
1084
1085 /// Indicates that an experiment is no longer running.
1086 ///
1087 /// # Arguments
1088 ///
1089 /// * `experiment_id` - The id of the active experiment to deactivate (maximum 30 bytes).
1090 pub fn set_experiment_inactive(&self, experiment_id: String) {
1091 let metric = ExperimentMetric::new(self, experiment_id);
1092 metric.set_inactive_sync(self);
1093 }
1094
1095 /// **Test-only API (exported for FFI purposes).**
1096 ///
1097 /// Gets stored data for the requested experiment.
1098 ///
1099 /// # Arguments
1100 ///
1101 /// * `experiment_id` - The id of the active experiment (maximum 30 bytes).
1102 pub fn test_get_experiment_data(&self, experiment_id: String) -> Option<RecordedExperiment> {
1103 let metric = ExperimentMetric::new(self, experiment_id);
1104 metric.test_get_value(self)
1105 }
1106
1107 /// **Test-only API (exported for FFI purposes).**
1108 ///
1109 /// Gets stored experimentation id annotation.
1110 pub fn test_get_experimentation_id(&self) -> Option<String> {
1111 self.additional_metrics
1112 .experimentation_id
1113 .get_value(self, None)
1114 }
1115
1116 /// Set configuration to override the default state, typically initiated from a
1117 /// remote_settings experiment or rollout
1118 ///
1119 /// # Arguments
1120 ///
1121 /// * `cfg` - The stringified JSON representation of a `RemoteSettingsConfig` object
1122 pub fn apply_server_knobs_config(&self, cfg: RemoteSettingsConfig) {
1123 // Set the current RemoteSettingsConfig, keeping the lock until the epoch is
1124 // updated to prevent against reading a "new" config but an "old" epoch
1125 let mut remote_settings_config = self.remote_settings_config.lock().unwrap();
1126
1127 // Merge the exising metrics configuration with the supplied one
1128 remote_settings_config
1129 .metrics_enabled
1130 .extend(cfg.metrics_enabled);
1131
1132 // Merge the exising ping configuration with the supplied one
1133 remote_settings_config
1134 .pings_enabled
1135 .extend(cfg.pings_enabled);
1136
1137 remote_settings_config.event_threshold = cfg.event_threshold;
1138
1139 // Update remote_settings epoch
1140 self.remote_settings_epoch.fetch_add(1, Ordering::SeqCst);
1141 }
1142
1143 /// Persists [`Lifetime::Ping`] data that might be in memory in case
1144 /// [`delay_ping_lifetime_io`](InternalConfiguration::delay_ping_lifetime_io) is set
1145 /// or was set at a previous time.
1146 ///
1147 /// If there is no data to persist, this function does nothing.
1148 pub fn persist_ping_lifetime_data(&self) -> Result<()> {
1149 if let Some(data) = self.data_store.as_ref() {
1150 return data.persist_ping_lifetime_data();
1151 }
1152
1153 Ok(())
1154 }
1155
1156 /// Sets internally-handled application lifetime metrics.
1157 fn set_application_lifetime_core_metrics(&self) {
1158 self.core_metrics.os.set_sync(self, system::OS);
1159 }
1160
1161 /// **This is not meant to be used directly.**
1162 ///
1163 /// Clears all the metrics that have [`Lifetime::Application`].
1164 pub fn clear_application_lifetime_metrics(&self) {
1165 log::trace!("Clearing Lifetime::Application metrics");
1166 if let Some(data) = self.data_store.as_ref() {
1167 data.clear_lifetime(Lifetime::Application);
1168 }
1169
1170 // Set internally handled app lifetime metrics again.
1171 self.set_application_lifetime_core_metrics();
1172 }
1173
1174 /// Whether or not this is the first run on this profile.
1175 pub fn is_first_run(&self) -> bool {
1176 self.is_first_run
1177 }
1178
1179 /// Sets a debug view tag.
1180 ///
1181 /// This will return `false` in case `value` is not a valid tag.
1182 ///
1183 /// When the debug view tag is set, pings are sent with a `X-Debug-ID` header with the value of the tag
1184 /// and are sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
1185 ///
1186 /// # Arguments
1187 ///
1188 /// * `value` - A valid HTTP header value. Must match the regex: "[a-zA-Z0-9-]{1,20}".
1189 pub fn set_debug_view_tag(&mut self, value: &str) -> bool {
1190 self.debug.debug_view_tag.set(value.into())
1191 }
1192
1193 /// Return the value for the debug view tag or [`None`] if it hasn't been set.
1194 ///
1195 /// The `debug_view_tag` may be set from an environment variable
1196 /// (`GLEAN_DEBUG_VIEW_TAG`) or through the [`set_debug_view_tag`](Glean::set_debug_view_tag) function.
1197 pub fn debug_view_tag(&self) -> Option<&String> {
1198 self.debug.debug_view_tag.get()
1199 }
1200
1201 /// Sets source tags.
1202 ///
1203 /// This will return `false` in case `value` contains invalid tags.
1204 ///
1205 /// Ping tags will show in the destination datasets, after ingestion.
1206 ///
1207 /// **Note** If one or more tags are invalid, all tags are ignored.
1208 ///
1209 /// # Arguments
1210 ///
1211 /// * `value` - A vector of at most 5 valid HTTP header values. Individual tags must match the regex: "[a-zA-Z0-9-]{1,20}".
1212 pub fn set_source_tags(&mut self, value: Vec<String>) -> bool {
1213 self.debug.source_tags.set(value)
1214 }
1215
1216 /// Return the value for the source tags or [`None`] if it hasn't been set.
1217 ///
1218 /// The `source_tags` may be set from an environment variable (`GLEAN_SOURCE_TAGS`)
1219 /// or through the [`set_source_tags`](Glean::set_source_tags) function.
1220 pub(crate) fn source_tags(&self) -> Option<&Vec<String>> {
1221 self.debug.source_tags.get()
1222 }
1223
1224 /// Sets the log pings debug option.
1225 ///
1226 /// This will return `false` in case we are unable to set the option.
1227 ///
1228 /// When the log pings debug option is `true`,
1229 /// we log the payload of all succesfully assembled pings.
1230 ///
1231 /// # Arguments
1232 ///
1233 /// * `value` - The value of the log pings option
1234 pub fn set_log_pings(&mut self, value: bool) -> bool {
1235 self.debug.log_pings.set(value)
1236 }
1237
1238 /// Return the value for the log pings debug option or `false` if it hasn't been set.
1239 ///
1240 /// The `log_pings` option may be set from an environment variable (`GLEAN_LOG_PINGS`)
1241 /// or through the `set_log_pings` function.
1242 pub fn log_pings(&self) -> bool {
1243 self.debug.log_pings.get().copied().unwrap_or(false)
1244 }
1245
1246 fn get_dirty_bit_metric(&self) -> metrics::BooleanMetric {
1247 metrics::BooleanMetric::new(CommonMetricData {
1248 name: "dirtybit".into(),
1249 // We don't need a category, the name is already unique
1250 category: "".into(),
1251 send_in_pings: vec![INTERNAL_STORAGE.into()],
1252 lifetime: Lifetime::User,
1253 ..Default::default()
1254 })
1255 }
1256
1257 /// **This is not meant to be used directly.**
1258 ///
1259 /// Sets the value of a "dirty flag" in the permanent storage.
1260 ///
1261 /// The "dirty flag" is meant to have the following behaviour, implemented
1262 /// by the consumers of the FFI layer:
1263 ///
1264 /// - on mobile: set to `false` when going to background or shutting down,
1265 /// set to `true` at startup and when going to foreground.
1266 /// - on non-mobile platforms: set to `true` at startup and `false` at
1267 /// shutdown.
1268 ///
1269 /// At startup, before setting its new value, if the "dirty flag" value is
1270 /// `true`, then Glean knows it did not exit cleanly and can implement
1271 /// coping mechanisms (e.g. sending a `baseline` ping).
1272 pub fn set_dirty_flag(&self, new_value: bool) {
1273 self.get_dirty_bit_metric().set_sync(self, new_value);
1274 }
1275
1276 /// **This is not meant to be used directly.**
1277 ///
1278 /// Checks the stored value of the "dirty flag".
1279 pub fn is_dirty_flag_set(&self) -> bool {
1280 let dirty_bit_metric = self.get_dirty_bit_metric();
1281 match StorageManager.snapshot_metric(
1282 self.storage(),
1283 INTERNAL_STORAGE,
1284 &dirty_bit_metric.meta().identifier(self),
1285 dirty_bit_metric.meta().inner.lifetime,
1286 ) {
1287 Some(Metric::Boolean(b)) => b,
1288 _ => false,
1289 }
1290 }
1291
1292 /// Performs the collection/cleanup operations required by becoming active.
1293 ///
1294 /// This functions generates a baseline ping with reason `active`
1295 /// and then sets the dirty bit.
1296 pub fn handle_client_active(&mut self) {
1297 if !self
1298 .internal_pings
1299 .baseline
1300 .submit_sync(self, Some("active"))
1301 {
1302 log::info!("baseline ping not submitted on active");
1303 }
1304
1305 self.set_dirty_flag(true);
1306 }
1307
1308 /// Performs the collection/cleanup operations required by becoming inactive.
1309 ///
1310 /// This functions generates a baseline and an events ping with reason
1311 /// `inactive` and then clears the dirty bit.
1312 pub fn handle_client_inactive(&mut self) {
1313 if !self
1314 .internal_pings
1315 .baseline
1316 .submit_sync(self, Some("inactive"))
1317 {
1318 log::info!("baseline ping not submitted on inactive");
1319 }
1320
1321 if !self
1322 .internal_pings
1323 .events
1324 .submit_sync(self, Some("inactive"))
1325 {
1326 log::info!("events ping not submitted on inactive");
1327 }
1328
1329 self.set_dirty_flag(false);
1330 }
1331
1332 /// **Test-only API (exported for FFI purposes).**
1333 ///
1334 /// Deletes all stored metrics.
1335 ///
1336 /// Note that this also includes the ping sequence numbers, so it has
1337 /// the effect of resetting those to their initial values.
1338 pub fn test_clear_all_stores(&self) {
1339 if let Some(data) = self.data_store.as_ref() {
1340 data.clear_all()
1341 }
1342 // We don't care about this failing, maybe the data does just not exist.
1343 let _ = self.event_data_store.clear_all();
1344 }
1345
1346 /// Instructs the Metrics Ping Scheduler's thread to exit cleanly.
1347 /// If Glean was configured with `use_core_mps: false`, this has no effect.
1348 pub fn cancel_metrics_ping_scheduler(&self) {
1349 if self.schedule_metrics_pings {
1350 scheduler::cancel();
1351 }
1352 }
1353
1354 /// Instructs the Metrics Ping Scheduler to being scheduling metrics pings.
1355 /// If Glean wsa configured with `use_core_mps: false`, this has no effect.
1356 pub fn start_metrics_ping_scheduler(&self) {
1357 if self.schedule_metrics_pings {
1358 scheduler::schedule(self);
1359 }
1360 }
1361
1362 /// Updates attribution fields with new values.
1363 /// AttributionMetrics fields with `None` values will not overwrite older values.
1364 pub fn update_attribution(&self, attribution: AttributionMetrics) {
1365 if let Some(source) = attribution.source {
1366 self.core_metrics.attribution_source.set_sync(self, source);
1367 }
1368 if let Some(medium) = attribution.medium {
1369 self.core_metrics.attribution_medium.set_sync(self, medium);
1370 }
1371 if let Some(campaign) = attribution.campaign {
1372 self.core_metrics
1373 .attribution_campaign
1374 .set_sync(self, campaign);
1375 }
1376 if let Some(term) = attribution.term {
1377 self.core_metrics.attribution_term.set_sync(self, term);
1378 }
1379 if let Some(content) = attribution.content {
1380 self.core_metrics
1381 .attribution_content
1382 .set_sync(self, content);
1383 }
1384 }
1385
1386 /// **TEST-ONLY Method**
1387 ///
1388 /// Returns the current attribution metrics.
1389 pub fn test_get_attribution(&self) -> AttributionMetrics {
1390 AttributionMetrics {
1391 source: self
1392 .core_metrics
1393 .attribution_source
1394 .get_value(self, Some("glean_client_info")),
1395 medium: self
1396 .core_metrics
1397 .attribution_medium
1398 .get_value(self, Some("glean_client_info")),
1399 campaign: self
1400 .core_metrics
1401 .attribution_campaign
1402 .get_value(self, Some("glean_client_info")),
1403 term: self
1404 .core_metrics
1405 .attribution_term
1406 .get_value(self, Some("glean_client_info")),
1407 content: self
1408 .core_metrics
1409 .attribution_content
1410 .get_value(self, Some("glean_client_info")),
1411 }
1412 }
1413
1414 /// Updates distribution fields with new values.
1415 /// DistributionMetrics fields with `None` values will not overwrite older values.
1416 pub fn update_distribution(&self, distribution: DistributionMetrics) {
1417 if let Some(name) = distribution.name {
1418 self.core_metrics.distribution_name.set_sync(self, name);
1419 }
1420 }
1421
1422 /// **TEST-ONLY Method**
1423 ///
1424 /// Returns the current distribution metrics.
1425 pub fn test_get_distribution(&self) -> DistributionMetrics {
1426 DistributionMetrics {
1427 name: self
1428 .core_metrics
1429 .distribution_name
1430 .get_value(self, Some("glean_client_info")),
1431 }
1432 }
1433}