use std::collections::HashMap;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use chrono::{DateTime, FixedOffset};
use malloc_size_of_derive::MallocSizeOf;
use once_cell::sync::OnceCell;
use uuid::Uuid;
use crate::database::Database;
use crate::debug::DebugOptions;
use crate::error::ClientIdFileError;
use crate::event_database::EventDatabase;
use crate::internal_metrics::{
AdditionalMetrics, CoreMetrics, DatabaseMetrics, ExceptionState, HealthMetrics,
};
use crate::internal_pings::InternalPings;
use crate::metrics::{
self, ExperimentMetric, Metric, MetricType, PingType, RecordedExperiment, RemoteSettingsConfig,
};
use crate::ping::PingMaker;
use crate::storage::{StorageManager, INTERNAL_STORAGE};
use crate::upload::{PingUploadManager, PingUploadTask, UploadResult, UploadTaskAction};
use crate::util::{local_now_with_offset, sanitize_application_id};
use crate::{
scheduler, system, AttributionMetrics, CommonMetricData, DistributionMetrics, ErrorKind,
InternalConfiguration, Lifetime, PingRateLimit, Result, DEFAULT_MAX_EVENTS,
GLEAN_SCHEMA_VERSION, GLEAN_VERSION, KNOWN_CLIENT_ID,
};
const CLIENT_ID_PLAIN_FILENAME: &str = "client_id.txt";
static GLEAN: OnceCell<Mutex<Glean>> = OnceCell::new();
pub const DEFAULT_SECONDS_PER_INTERVAL: u64 = 60;
pub const DEFAULT_PINGS_PER_INTERVAL: u32 = 15;
pub fn global_glean() -> Option<&'static Mutex<Glean>> {
GLEAN.get()
}
pub fn setup_glean(glean: Glean) -> Result<()> {
if GLEAN.get().is_none() {
if GLEAN.set(Mutex::new(glean)).is_err() {
log::warn!(
"Global Glean object is initialized already. This probably happened concurrently."
)
}
} else {
let mut lock = GLEAN.get().unwrap().lock().unwrap();
*lock = glean;
}
Ok(())
}
pub fn with_glean<F, R>(f: F) -> R
where
F: FnOnce(&Glean) -> R,
{
let glean = global_glean().expect("Global Glean object not initialized");
let lock = glean.lock().unwrap();
f(&lock)
}
pub fn with_glean_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut Glean) -> R,
{
let glean = global_glean().expect("Global Glean object not initialized");
let mut lock = glean.lock().unwrap();
f(&mut lock)
}
pub fn with_opt_glean<F, R>(f: F) -> Option<R>
where
F: FnOnce(&Glean) -> R,
{
let glean = global_glean()?;
let lock = glean.lock().unwrap();
Some(f(&lock))
}
#[derive(Debug, MallocSizeOf)]
pub struct Glean {
upload_enabled: bool,
pub(crate) data_store: Option<Database>,
event_data_store: EventDatabase,
pub(crate) core_metrics: CoreMetrics,
pub(crate) additional_metrics: AdditionalMetrics,
pub(crate) database_metrics: DatabaseMetrics,
pub(crate) health_metrics: HealthMetrics,
pub(crate) internal_pings: InternalPings,
data_path: PathBuf,
application_id: String,
ping_registry: HashMap<String, PingType>,
#[ignore_malloc_size_of = "external non-allocating type"]
start_time: DateTime<FixedOffset>,
max_events: u32,
is_first_run: bool,
pub(crate) upload_manager: PingUploadManager,
debug: DebugOptions,
pub(crate) app_build: String,
pub(crate) schedule_metrics_pings: bool,
pub(crate) remote_settings_epoch: AtomicU8,
#[ignore_malloc_size_of = "TODO: Expose Glean's inner memory allocations (bug 1960592)"]
pub(crate) remote_settings_config: Arc<Mutex<RemoteSettingsConfig>>,
pub(crate) with_timestamps: bool,
pub(crate) ping_schedule: HashMap<String, Vec<String>>,
}
impl Glean {
pub fn new_for_subprocess(cfg: &InternalConfiguration, scan_directories: bool) -> Result<Self> {
log::info!("Creating new Glean v{}", GLEAN_VERSION);
let application_id = sanitize_application_id(&cfg.application_id);
if application_id.is_empty() {
return Err(ErrorKind::InvalidConfig.into());
}
let data_path = Path::new(&cfg.data_path);
let event_data_store = EventDatabase::new(data_path)?;
let mut upload_manager = PingUploadManager::new(&cfg.data_path, &cfg.language_binding_name);
let rate_limit = cfg.rate_limit.as_ref().unwrap_or(&PingRateLimit {
seconds_per_interval: DEFAULT_SECONDS_PER_INTERVAL,
pings_per_interval: DEFAULT_PINGS_PER_INTERVAL,
});
upload_manager.set_rate_limiter(
rate_limit.seconds_per_interval,
rate_limit.pings_per_interval,
);
if scan_directories {
let _scanning_thread = upload_manager.scan_pending_pings_directories(false);
}
let start_time = local_now_with_offset();
let mut this = Self {
upload_enabled: cfg.upload_enabled,
data_store: None,
event_data_store,
core_metrics: CoreMetrics::new(),
additional_metrics: AdditionalMetrics::new(),
database_metrics: DatabaseMetrics::new(),
health_metrics: HealthMetrics::new(),
internal_pings: InternalPings::new(cfg.enable_internal_pings),
upload_manager,
data_path: PathBuf::from(&cfg.data_path),
application_id,
ping_registry: HashMap::new(),
start_time,
max_events: cfg.max_events.unwrap_or(DEFAULT_MAX_EVENTS),
is_first_run: false,
debug: DebugOptions::new(),
app_build: cfg.app_build.to_string(),
schedule_metrics_pings: false,
remote_settings_epoch: AtomicU8::new(0),
remote_settings_config: Arc::new(Mutex::new(RemoteSettingsConfig::new())),
with_timestamps: cfg.enable_event_timestamps,
ping_schedule: cfg.ping_schedule.clone(),
};
let pings = this.internal_pings.clone();
this.register_ping_type(&pings.baseline);
this.register_ping_type(&pings.metrics);
this.register_ping_type(&pings.events);
this.register_ping_type(&pings.health);
this.register_ping_type(&pings.deletion_request);
Ok(this)
}
pub fn new(cfg: InternalConfiguration) -> Result<Self> {
let mut glean = Self::new_for_subprocess(&cfg, false)?;
let data_path = Path::new(&cfg.data_path);
let ping_lifetime_threshold = cfg.ping_lifetime_threshold as usize;
let ping_lifetime_max_time = Duration::from_millis(cfg.ping_lifetime_max_time);
glean.data_store = Some(Database::new(
data_path,
cfg.delay_ping_lifetime_io,
ping_lifetime_threshold,
ping_lifetime_max_time,
)?);
let stored_client_id = match glean.client_id_from_file() {
Ok(id) if id == *KNOWN_CLIENT_ID => {
glean
.health_metrics
.file_read_error
.get("c0ffee-in-file")
.add_sync(&glean, 1);
None
}
Ok(id) => Some(id),
Err(ClientIdFileError::NotFound) => {
glean
.health_metrics
.file_read_error
.get("file-not-found")
.add_sync(&glean, 1);
None
}
Err(ClientIdFileError::PermissionDenied) => {
glean
.health_metrics
.file_read_error
.get("permission-denied")
.add_sync(&glean, 1);
None
}
Err(ClientIdFileError::ParseError(e)) => {
log::trace!("reading cliend_id.txt. Could not parse into UUID: {e}");
glean
.health_metrics
.file_read_error
.get("parse")
.add_sync(&glean, 1);
None
}
Err(ClientIdFileError::IoError(e)) => {
log::trace!("reading client_id.txt. Unexpected io error: {e}");
glean
.health_metrics
.file_read_error
.get("io")
.add_sync(&glean, 1);
None
}
};
{
let data_store = glean.data_store.as_ref().unwrap();
let file_size = data_store.file_size.map(|n| n.get()).unwrap_or(0);
if let Some(stored_client_id) = stored_client_id {
if file_size == 0 {
log::trace!("no database. database size={file_size}. stored_client_id={stored_client_id}");
glean
.health_metrics
.recovered_client_id
.set_from_uuid_sync(&glean, stored_client_id);
glean
.health_metrics
.exception_state
.set_sync(&glean, ExceptionState::EmptyDb);
glean
.core_metrics
.client_id
.set_from_uuid_sync(&glean, stored_client_id);
} else {
let db_client_id = glean
.core_metrics
.client_id
.get_value(&glean, Some("glean_client_info"));
match db_client_id {
None => {
log::trace!("no client_id in DB. stored_client_id={stored_client_id}");
glean
.health_metrics
.exception_state
.set_sync(&glean, ExceptionState::RegenDb);
glean
.core_metrics
.client_id
.set_from_uuid_sync(&glean, stored_client_id);
}
Some(db_client_id) if db_client_id == *KNOWN_CLIENT_ID => {
log::trace!(
"c0ffee client_id in DB, stored_client_id={stored_client_id}"
);
glean
.health_metrics
.recovered_client_id
.set_from_uuid_sync(&glean, stored_client_id);
glean
.health_metrics
.exception_state
.set_sync(&glean, ExceptionState::C0ffeeInDb);
glean
.core_metrics
.client_id
.set_from_uuid_sync(&glean, stored_client_id);
}
Some(db_client_id) if db_client_id == stored_client_id => {
log::trace!("database consistent. db_client_id == stored_client_id: {db_client_id}");
}
Some(db_client_id) => {
log::trace!(
"client_id mismatch. db_client_id{db_client_id}, stored_client_id={stored_client_id}. Overwriting file with db's client_id."
);
glean
.health_metrics
.recovered_client_id
.set_from_uuid_sync(&glean, stored_client_id);
glean
.health_metrics
.exception_state
.set_sync(&glean, ExceptionState::ClientIdMismatch);
glean.store_client_id_with_reporting(
db_client_id,
"client_id mismatch will re-occur.",
);
}
}
}
} else {
log::trace!("No stored client ID. Database might have it.");
let db_client_id = glean
.core_metrics
.client_id
.get_value(&glean, Some("glean_client_info"));
if let Some(db_client_id) = db_client_id {
glean.store_client_id_with_reporting(
db_client_id,
"Might happen on next init then.",
);
} else {
log::trace!("Database has no client ID either. We might be fresh!");
}
}
}
if let Some(experimentation_id) = &cfg.experimentation_id {
glean
.additional_metrics
.experimentation_id
.set_sync(&glean, experimentation_id.to_string());
}
if cfg.upload_enabled {
glean.on_upload_enabled();
} else {
match glean
.core_metrics
.client_id
.get_value(&glean, Some("glean_client_info"))
{
None => glean.clear_metrics(),
Some(uuid) => {
if let Err(e) = glean.remove_stored_client_id() {
log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
}
if uuid == *KNOWN_CLIENT_ID {
if let Some(data) = glean.data_store.as_ref() {
_ = data.remove_single_metric(
Lifetime::User,
"glean_client_info",
"client_id",
);
}
} else {
glean.upload_enabled = true;
glean.on_upload_disabled(true);
}
}
}
}
glean.schedule_metrics_pings = cfg.enable_internal_pings && cfg.use_core_mps;
let _scanning_thread = glean.upload_manager.scan_pending_pings_directories(true);
Ok(glean)
}
#[cfg(test)]
pub(crate) fn with_options(
data_path: &str,
application_id: &str,
upload_enabled: bool,
enable_internal_pings: bool,
) -> Self {
let cfg = InternalConfiguration {
data_path: data_path.into(),
application_id: application_id.into(),
language_binding_name: "Rust".into(),
upload_enabled,
max_events: None,
delay_ping_lifetime_io: false,
app_build: "Unknown".into(),
use_core_mps: false,
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
enable_event_timestamps: true,
experimentation_id: None,
enable_internal_pings,
ping_schedule: Default::default(),
ping_lifetime_threshold: 0,
ping_lifetime_max_time: 0,
};
let mut glean = Self::new(cfg).unwrap();
glean.upload_manager = PingUploadManager::no_policy(data_path);
glean
}
pub fn destroy_db(&mut self) {
self.data_store = None;
}
fn client_id_file_path(&self) -> PathBuf {
self.data_path.join(CLIENT_ID_PLAIN_FILENAME)
}
fn store_client_id(&self, client_id: Uuid) -> Result<(), ClientIdFileError> {
let mut fp = File::create(self.client_id_file_path())?;
let mut buffer = Uuid::encode_buffer();
let uuid_str = client_id.hyphenated().encode_lower(&mut buffer);
fp.write_all(uuid_str.as_bytes())?;
fp.sync_all()?;
Ok(())
}
fn store_client_id_with_reporting(&self, client_id: Uuid, msg: &str) {
if let Err(err) = self.store_client_id(client_id) {
log::error!(
"Could not write {client_id} to state file. {} Error: {err}",
msg
);
match err {
ClientIdFileError::NotFound => {
self.health_metrics
.file_write_error
.get("not-found")
.add_sync(self, 1);
}
ClientIdFileError::PermissionDenied => {
self.health_metrics
.file_write_error
.get("permission-denied")
.add_sync(self, 1);
}
ClientIdFileError::IoError(..) => {
self.health_metrics
.file_write_error
.get("io")
.add_sync(self, 1);
}
ClientIdFileError::ParseError(..) => {
log::error!("Parse error encountered on file write. This is impossible.");
}
}
}
}
fn client_id_from_file(&self) -> Result<Uuid, ClientIdFileError> {
let uuid_str = fs::read_to_string(self.client_id_file_path())?;
let uuid = Uuid::try_parse(uuid_str.trim_end())?;
Ok(uuid)
}
fn remove_stored_client_id(&self) -> Result<(), ClientIdFileError> {
match fs::remove_file(self.client_id_file_path()) {
Ok(()) => Ok(()),
Err(e) if e.kind() == io::ErrorKind::NotFound => {
Ok(())
}
Err(e) => Err(e.into()),
}
}
fn initialize_core_metrics(&mut self) {
let need_new_client_id = match self
.core_metrics
.client_id
.get_value(self, Some("glean_client_info"))
{
None => true,
Some(uuid) => uuid == *KNOWN_CLIENT_ID,
};
if need_new_client_id {
let new_clientid = self.core_metrics.client_id.generate_and_set_sync(self);
self.store_client_id_with_reporting(new_clientid, "New client in database only.");
}
if self
.core_metrics
.first_run_date
.get_value(self, "glean_client_info")
.is_none()
{
self.core_metrics.first_run_date.set_sync(self, None);
self.is_first_run = true;
}
self.set_application_lifetime_core_metrics();
}
fn initialize_database_metrics(&mut self) {
log::trace!("Initializing database metrics");
if let Some(size) = self
.data_store
.as_ref()
.and_then(|database| database.file_size())
{
log::trace!("Database file size: {}", size.get());
self.database_metrics
.size
.accumulate_sync(self, size.get() as i64)
}
if let Some(rkv_load_state) = self
.data_store
.as_ref()
.and_then(|database| database.rkv_load_state())
{
self.database_metrics
.rkv_load_error
.set_sync(self, rkv_load_state)
}
}
pub fn on_ready_to_submit_pings(&mut self, trim_data_to_registered_pings: bool) -> bool {
if !self.upload_enabled {
log::debug!("on_ready_to_submit_pings. let's clear pings once again.");
self.clear_metrics();
}
self.event_data_store
.flush_pending_events_on_startup(self, trim_data_to_registered_pings)
}
pub fn set_upload_enabled(&mut self, flag: bool) -> bool {
log::info!("Upload enabled: {:?}", flag);
if self.upload_enabled != flag {
if flag {
self.on_upload_enabled();
} else {
self.on_upload_disabled(false);
}
true
} else {
false
}
}
#[doc(hidden)]
pub fn set_ping_enabled(&mut self, ping: &PingType, enabled: bool) {
ping.store_enabled(enabled);
if !enabled {
if let Some(data) = self.data_store.as_ref() {
_ = data.clear_ping_lifetime_storage(ping.name());
_ = data.clear_lifetime_storage(Lifetime::User, ping.name());
_ = data.clear_lifetime_storage(Lifetime::Application, ping.name());
}
let ping_maker = PingMaker::new();
let disabled_pings = &[ping.name()][..];
if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), disabled_pings) {
log::warn!("Error clearing pending pings: {}", err);
}
}
}
pub fn is_upload_enabled(&self) -> bool {
self.upload_enabled
}
pub fn is_ping_enabled(&self, ping: &str) -> bool {
const DEFAULT_ENABLED: &[&str] = &[
"glean_client_info",
"glean_internal_info",
"all-pings",
];
if DEFAULT_ENABLED.contains(&ping) {
return true;
}
let Some(ping) = self.ping_registry.get(ping) else {
log::trace!("Unknown ping {ping}. Assuming disabled.");
return false;
};
ping.enabled(self)
}
fn on_upload_enabled(&mut self) {
self.upload_enabled = true;
self.initialize_core_metrics();
self.initialize_database_metrics();
}
fn on_upload_disabled(&mut self, during_init: bool) {
let reason = if during_init {
Some("at_init")
} else {
Some("set_upload_enabled")
};
if !self
.internal_pings
.deletion_request
.submit_sync(self, reason)
{
log::error!("Failed to submit deletion-request ping on optout.");
}
self.clear_metrics();
self.upload_enabled = false;
}
fn clear_metrics(&mut self) {
let _lock = self.upload_manager.clear_ping_queue();
let ping_maker = PingMaker::new();
let disabled_pings = self
.ping_registry
.iter()
.filter(|&(_ping_name, ping)| ping.follows_collection_enabled())
.map(|(ping_name, _ping)| &ping_name[..])
.collect::<Vec<_>>();
if let Err(err) = ping_maker.clear_pending_pings(self.get_data_path(), &disabled_pings) {
log::warn!("Error clearing pending pings: {}", err);
}
if let Err(e) = self.remove_stored_client_id() {
log::error!("Couldn't remove client ID on disk. This might lead to a resurrection of this client ID later. Error: {e}");
}
if let Some(data) = self.data_store.as_ref() {
_ = data.clear_lifetime_storage(Lifetime::User, "glean_internal_info");
_ = data.remove_single_metric(Lifetime::User, "glean_client_info", "client_id");
for (ping_name, ping) in &self.ping_registry {
if ping.follows_collection_enabled() {
_ = data.clear_ping_lifetime_storage(ping_name);
_ = data.clear_lifetime_storage(Lifetime::User, ping_name);
_ = data.clear_lifetime_storage(Lifetime::Application, ping_name);
}
}
}
if let Err(err) = self.event_data_store.clear_all() {
log::warn!("Error clearing pending events: {}", err);
}
}
pub fn get_application_id(&self) -> &str {
&self.application_id
}
pub fn get_data_path(&self) -> &Path {
&self.data_path
}
#[track_caller] pub fn storage(&self) -> &Database {
self.data_store.as_ref().expect("No database found")
}
pub fn storage_opt(&self) -> Option<&Database> {
self.data_store.as_ref()
}
pub fn event_storage(&self) -> &EventDatabase {
&self.event_data_store
}
pub(crate) fn with_timestamps(&self) -> bool {
self.with_timestamps
}
pub fn get_max_events(&self) -> usize {
let remote_settings_config = self.remote_settings_config.lock().unwrap();
if let Some(max_events) = remote_settings_config.event_threshold {
max_events as usize
} else {
self.max_events as usize
}
}
pub fn get_upload_task(&self) -> PingUploadTask {
self.upload_manager.get_upload_task(self, self.log_pings())
}
pub fn process_ping_upload_response(
&self,
uuid: &str,
status: UploadResult,
) -> UploadTaskAction {
self.upload_manager
.process_ping_upload_response(self, uuid, status)
}
pub fn snapshot(&mut self, store_name: &str, clear_store: bool) -> String {
StorageManager
.snapshot(self.storage(), store_name, clear_store)
.unwrap_or_else(|| String::from(""))
}
pub(crate) fn make_path(&self, ping_name: &str, doc_id: &str) -> String {
format!(
"/submit/{}/{}/{}/{}",
self.get_application_id(),
ping_name,
GLEAN_SCHEMA_VERSION,
doc_id
)
}
pub fn submit_ping_by_name(&self, ping_name: &str, reason: Option<&str>) -> bool {
match self.get_ping_by_name(ping_name) {
None => {
log::error!("Attempted to submit unknown ping '{}'", ping_name);
false
}
Some(ping) => ping.submit_sync(self, reason),
}
}
pub fn get_ping_by_name(&self, ping_name: &str) -> Option<&PingType> {
self.ping_registry.get(ping_name)
}
pub fn register_ping_type(&mut self, ping: &PingType) {
if self.ping_registry.contains_key(ping.name()) {
log::debug!("Duplicate ping named '{}'", ping.name())
}
self.ping_registry
.insert(ping.name().to_string(), ping.clone());
}
pub fn get_registered_ping_names(&self) -> Vec<&str> {
self.ping_registry.keys().map(String::as_str).collect()
}
pub(crate) fn start_time(&self) -> DateTime<FixedOffset> {
self.start_time
}
pub fn set_experiment_active(
&self,
experiment_id: String,
branch: String,
extra: HashMap<String, String>,
) {
let metric = ExperimentMetric::new(self, experiment_id);
metric.set_active_sync(self, branch, extra);
}
pub fn set_experiment_inactive(&self, experiment_id: String) {
let metric = ExperimentMetric::new(self, experiment_id);
metric.set_inactive_sync(self);
}
pub fn test_get_experiment_data(&self, experiment_id: String) -> Option<RecordedExperiment> {
let metric = ExperimentMetric::new(self, experiment_id);
metric.test_get_value(self)
}
pub fn test_get_experimentation_id(&self) -> Option<String> {
self.additional_metrics
.experimentation_id
.get_value(self, None)
}
pub fn apply_server_knobs_config(&self, cfg: RemoteSettingsConfig) {
let config_value = {
let mut remote_settings_config = self.remote_settings_config.lock().unwrap();
remote_settings_config
.metrics_enabled
.extend(cfg.metrics_enabled);
remote_settings_config
.pings_enabled
.extend(cfg.pings_enabled);
remote_settings_config.event_threshold = cfg.event_threshold;
serde_json::to_value(&*remote_settings_config).unwrap()
};
self.additional_metrics
.server_knobs_config
.set_sync(self, config_value);
self.remote_settings_epoch.fetch_add(1, Ordering::SeqCst);
}
pub fn persist_ping_lifetime_data(&self) -> Result<()> {
if let Some(data) = self.data_store.as_ref() {
return data.persist_ping_lifetime_data();
}
Ok(())
}
fn set_application_lifetime_core_metrics(&self) {
self.core_metrics.os.set_sync(self, system::OS);
}
pub fn clear_application_lifetime_metrics(&self) {
log::trace!("Clearing Lifetime::Application metrics");
if let Some(data) = self.data_store.as_ref() {
data.clear_lifetime(Lifetime::Application);
}
self.set_application_lifetime_core_metrics();
}
pub fn is_first_run(&self) -> bool {
self.is_first_run
}
pub fn set_debug_view_tag(&mut self, value: &str) -> bool {
self.debug.debug_view_tag.set(value.into())
}
pub fn debug_view_tag(&self) -> Option<&String> {
self.debug.debug_view_tag.get()
}
pub fn set_source_tags(&mut self, value: Vec<String>) -> bool {
self.debug.source_tags.set(value)
}
pub(crate) fn source_tags(&self) -> Option<&Vec<String>> {
self.debug.source_tags.get()
}
pub fn set_log_pings(&mut self, value: bool) -> bool {
self.debug.log_pings.set(value)
}
pub fn log_pings(&self) -> bool {
self.debug.log_pings.get().copied().unwrap_or(false)
}
fn get_dirty_bit_metric(&self) -> metrics::BooleanMetric {
metrics::BooleanMetric::new(CommonMetricData {
name: "dirtybit".into(),
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::User,
..Default::default()
})
}
pub fn set_dirty_flag(&self, new_value: bool) {
self.get_dirty_bit_metric().set_sync(self, new_value);
}
pub fn is_dirty_flag_set(&self) -> bool {
let dirty_bit_metric = self.get_dirty_bit_metric();
match StorageManager.snapshot_metric(
self.storage(),
INTERNAL_STORAGE,
&dirty_bit_metric.meta().identifier(self),
dirty_bit_metric.meta().inner.lifetime,
) {
Some(Metric::Boolean(b)) => b,
_ => false,
}
}
pub fn handle_client_active(&mut self) {
if !self
.internal_pings
.baseline
.submit_sync(self, Some("active"))
{
log::info!("baseline ping not submitted on active");
}
self.set_dirty_flag(true);
}
pub fn handle_client_inactive(&mut self) {
if !self
.internal_pings
.baseline
.submit_sync(self, Some("inactive"))
{
log::info!("baseline ping not submitted on inactive");
}
if !self
.internal_pings
.events
.submit_sync(self, Some("inactive"))
{
log::info!("events ping not submitted on inactive");
}
self.set_dirty_flag(false);
}
pub fn test_clear_all_stores(&self) {
if let Some(data) = self.data_store.as_ref() {
data.clear_all()
}
let _ = self.event_data_store.clear_all();
}
pub fn cancel_metrics_ping_scheduler(&self) {
if self.schedule_metrics_pings {
scheduler::cancel();
}
}
pub fn start_metrics_ping_scheduler(&self) {
if self.schedule_metrics_pings {
scheduler::schedule(self);
}
}
pub fn update_attribution(&self, attribution: AttributionMetrics) {
if let Some(source) = attribution.source {
self.core_metrics.attribution_source.set_sync(self, source);
}
if let Some(medium) = attribution.medium {
self.core_metrics.attribution_medium.set_sync(self, medium);
}
if let Some(campaign) = attribution.campaign {
self.core_metrics
.attribution_campaign
.set_sync(self, campaign);
}
if let Some(term) = attribution.term {
self.core_metrics.attribution_term.set_sync(self, term);
}
if let Some(content) = attribution.content {
self.core_metrics
.attribution_content
.set_sync(self, content);
}
}
pub fn test_get_attribution(&self) -> AttributionMetrics {
AttributionMetrics {
source: self
.core_metrics
.attribution_source
.get_value(self, Some("glean_client_info")),
medium: self
.core_metrics
.attribution_medium
.get_value(self, Some("glean_client_info")),
campaign: self
.core_metrics
.attribution_campaign
.get_value(self, Some("glean_client_info")),
term: self
.core_metrics
.attribution_term
.get_value(self, Some("glean_client_info")),
content: self
.core_metrics
.attribution_content
.get_value(self, Some("glean_client_info")),
}
}
pub fn update_distribution(&self, distribution: DistributionMetrics) {
if let Some(name) = distribution.name {
self.core_metrics.distribution_name.set_sync(self, name);
}
}
pub fn test_get_distribution(&self) -> DistributionMetrics {
DistributionMetrics {
name: self
.core_metrics
.distribution_name
.get_value(self, Some("glean_client_info")),
}
}
}