use super::config::MobileProfilerConfig;
use super::types::*;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use trustformers_core::error::{CoreError, Result};
use trustformers_core::TrustformersError;
#[cfg(any(target_os = "ios", target_os = "android"))]
extern crate libc;
#[derive(Debug, thiserror::Error)]
pub enum CollectionError {
#[error("System resource unavailable: {0}")]
ResourceUnavailable(String),
#[error("Permission denied: {0}")]
PermissionDenied(String),
#[error("Platform feature not supported: {0}")]
PlatformNotSupported(String),
#[error("Collection operation timed out after {timeout_ms}ms")]
Timeout { timeout_ms: u64 },
#[error("Invalid configuration: {0}")]
InvalidConfiguration(String),
#[error("Internal collection error: {0}")]
Internal(String),
}
#[derive(Debug)]
pub struct MobileMetricsCollector {
config: Arc<RwLock<MobileProfilerConfig>>,
current_metrics: Arc<RwLock<MobileMetricsSnapshot>>,
metrics_history: Arc<Mutex<VecDeque<MobileMetricsSnapshot>>>,
collection_state: Arc<RwLock<CollectionState>>,
platform_collector: Arc<dyn PlatformCollector + Send + Sync>,
inference_tracker: Arc<Mutex<InferenceTracker>>,
statistics: Arc<Mutex<CollectionStatistics>>,
}
#[derive(Debug, Clone)]
struct CollectionState {
is_collecting: bool,
collection_start: Option<Instant>,
last_collection: Option<Instant>,
total_samples: u64,
error_count: u64,
avg_collection_time_ms: f64,
}
#[derive(Debug)]
struct InferenceTracker {
active_inferences: HashMap<String, InferenceSession>,
completed_inferences: VecDeque<CompletedInference>,
model_load_times: HashMap<String, f64>,
cache_stats: CacheStats,
}
#[derive(Debug, Clone)]
struct InferenceSession {
id: String,
model_name: String,
start_time: Instant,
initial_metrics: SystemSnapshot,
}
#[derive(Debug, Clone)]
struct CompletedInference {
id: String,
model_name: String,
duration_ms: f64,
success: bool,
memory_delta_mb: f32,
cpu_usage_percent: f32,
gpu_usage_percent: f32,
timestamp: u64,
}
#[derive(Debug, Clone)]
struct CacheStats {
total_requests: u64,
cache_hits: u64,
cache_misses: u64,
avg_hit_latency_ms: f64,
avg_miss_latency_ms: f64,
}
#[derive(Debug, Clone)]
struct SystemSnapshot {
memory_usage_mb: f32,
cpu_usage_percent: f32,
gpu_usage_percent: f32,
timestamp: Instant,
}
#[derive(Debug, Clone)]
pub struct CollectionStatistics {
pub total_samples: u64,
pub collection_duration: Duration,
pub average_sampling_rate: f64,
pub history_size: usize,
pub current_memory_usage_mb: f32,
pub error_count: u64,
pub avg_collection_time_ms: f64,
pub success_rate: f64,
}
trait PlatformCollector: std::fmt::Debug {
fn collect_memory_metrics(&self) -> Result<MemoryMetrics>;
fn collect_cpu_metrics(&self) -> Result<CpuMetrics>;
fn collect_gpu_metrics(&self) -> Result<GpuMetrics>;
fn collect_thermal_metrics(&self) -> Result<ThermalMetrics>;
fn collect_battery_metrics(&self) -> Result<BatteryMetrics>;
fn collect_platform_metrics(&self) -> Result<PlatformMetrics>;
fn platform_name(&self) -> &str;
fn supports_metric(&self, metric_type: &str) -> bool;
}
#[cfg(target_os = "ios")]
struct IOSCollector {
config: Arc<RwLock<MobileProfilerConfig>>,
}
#[cfg(target_os = "android")]
struct AndroidCollector {
config: Arc<RwLock<MobileProfilerConfig>>,
}
#[derive(Debug)]
struct GenericCollector {
config: Arc<RwLock<MobileProfilerConfig>>,
}
impl MobileMetricsCollector {
pub fn new(config: MobileProfilerConfig) -> Result<Self> {
Self::validate_config(&config)?;
let config_arc = Arc::new(RwLock::new(config.clone()));
let platform_collector = Self::create_platform_collector(config_arc.clone())?;
let collection_state = Arc::new(RwLock::new(CollectionState {
is_collecting: false,
collection_start: None,
last_collection: None,
total_samples: 0,
error_count: 0,
avg_collection_time_ms: 0.0,
}));
let inference_tracker = Arc::new(Mutex::new(InferenceTracker {
active_inferences: HashMap::new(),
completed_inferences: VecDeque::new(),
model_load_times: HashMap::new(),
cache_stats: CacheStats {
total_requests: 0,
cache_hits: 0,
cache_misses: 0,
avg_hit_latency_ms: 0.0,
avg_miss_latency_ms: 0.0,
},
}));
let statistics = Arc::new(Mutex::new(CollectionStatistics {
total_samples: 0,
collection_duration: Duration::new(0, 0),
average_sampling_rate: 0.0,
history_size: 0,
current_memory_usage_mb: 0.0,
error_count: 0,
avg_collection_time_ms: 0.0,
success_rate: 1.0,
}));
tracing::info!(
"Initialized MobileMetricsCollector for platform: {}",
platform_collector.platform_name()
);
Ok(Self {
config: config_arc,
current_metrics: Arc::new(RwLock::new(MobileMetricsSnapshot::default())),
metrics_history: Arc::new(Mutex::new(VecDeque::new())),
collection_state,
platform_collector,
inference_tracker,
statistics,
})
}
pub fn start_collection(&self) -> Result<()> {
{
let state = self
.collection_state
.read()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if state.is_collecting {
tracing::warn!("Collection already active, ignoring start request");
return Ok(());
}
}
let collection_start = Instant::now();
let initial_result = self.collect_metrics_internal();
match initial_result {
Ok(_) => {
let mut state = self
.collection_state
.write()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
state.is_collecting = true;
state.collection_start = Some(collection_start);
state.last_collection = Some(Instant::now());
tracing::info!("Started mobile metrics collection");
Ok(())
},
Err(e) => {
tracing::error!("Failed to start collection: {}", e);
Err(e)
},
}
}
pub fn stop_collection(&self) -> Result<()> {
let mut state = self
.collection_state
.write()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if !state.is_collecting {
tracing::warn!("Collection not active, ignoring stop request");
return Ok(());
}
state.is_collecting = false;
if let Ok(mut stats) = self.statistics.lock() {
if let Some(start_time) = state.collection_start {
stats.collection_duration = start_time.elapsed();
if stats.collection_duration.as_secs() > 0 {
stats.average_sampling_rate =
state.total_samples as f64 / stats.collection_duration.as_secs() as f64;
}
}
if state.total_samples > 0 {
stats.success_rate = 1.0 - (state.error_count as f64 / state.total_samples as f64);
}
}
tracing::info!(
"Stopped mobile metrics collection. Collected {} samples with {} errors",
state.total_samples,
state.error_count
);
Ok(())
}
pub fn pause_collection(&self) -> Result<()> {
let mut state = self
.collection_state
.write()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if !state.is_collecting {
tracing::warn!("Collection not active, cannot pause");
return Ok(());
}
state.is_collecting = false;
tracing::info!("Paused mobile metrics collection");
Ok(())
}
pub fn resume_collection(&self) -> Result<()> {
let mut state = self
.collection_state
.write()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if state.is_collecting {
tracing::warn!("Collection already active, ignoring resume request");
return Ok(());
}
state.is_collecting = true;
state.last_collection = Some(Instant::now());
tracing::info!("Resumed mobile metrics collection");
Ok(())
}
pub fn get_collection_stats(&self) -> Result<CollectionStatistics> {
let stats = self
.statistics
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
Ok(stats.clone())
}
pub fn get_current_snapshot(&self) -> Result<MobileMetricsSnapshot> {
let snapshot = self
.current_metrics
.read()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
Ok(snapshot.clone())
}
pub fn get_all_snapshots(&self) -> Vec<MobileMetricsSnapshot> {
self.metrics_history
.lock()
.map(|history| history.iter().cloned().collect())
.unwrap_or_default()
}
pub fn collect_metrics(&self) -> Result<()> {
self.collect_metrics_internal()
}
pub fn start_inference_tracking(&self, session_id: &str, model_name: &str) -> Result<()> {
let mut tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
let current_snapshot = self.get_current_snapshot()?;
let system_snapshot = SystemSnapshot {
memory_usage_mb: current_snapshot.memory.heap_used_mb,
cpu_usage_percent: current_snapshot.cpu.usage_percent,
gpu_usage_percent: current_snapshot.gpu.usage_percent,
timestamp: Instant::now(),
};
let session = InferenceSession {
id: session_id.to_string(),
model_name: model_name.to_string(),
start_time: Instant::now(),
initial_metrics: system_snapshot,
};
tracker.active_inferences.insert(session_id.to_string(), session);
tracing::debug!("Started inference tracking for session: {}", session_id);
Ok(())
}
pub fn end_inference_tracking(&self, session_id: &str, success: bool) -> Result<()> {
let mut tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if let Some(session) = tracker.active_inferences.remove(session_id) {
let end_time = Instant::now();
let duration_ms = session.start_time.elapsed().as_millis() as f64;
let current_snapshot = self.get_current_snapshot()?;
let completed_inference = CompletedInference {
id: session_id.to_string(),
model_name: session.model_name.clone(),
duration_ms,
success,
memory_delta_mb: current_snapshot.memory.heap_used_mb
- session.initial_metrics.memory_usage_mb,
cpu_usage_percent: current_snapshot.cpu.usage_percent,
gpu_usage_percent: current_snapshot.gpu.usage_percent,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64,
};
tracker.completed_inferences.push_back(completed_inference);
if tracker.completed_inferences.len() > 1000 {
tracker.completed_inferences.pop_front();
}
tracing::debug!(
"Completed inference tracking for session: {} ({}ms, success: {})",
session_id,
duration_ms,
success
);
} else {
tracing::warn!("No active inference session found for ID: {}", session_id);
}
Ok(())
}
pub fn record_model_load_time(&self, model_name: &str, load_time_ms: f64) -> Result<()> {
let mut tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
tracker.model_load_times.insert(model_name.to_string(), load_time_ms);
tracing::debug!(
"Recorded model load time: {} = {}ms",
model_name,
load_time_ms
);
Ok(())
}
pub fn record_cache_hit(&self, latency_ms: f64) -> Result<()> {
let mut tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
tracker.cache_stats.total_requests += 1;
tracker.cache_stats.cache_hits += 1;
let total_hits = tracker.cache_stats.cache_hits as f64;
tracker.cache_stats.avg_hit_latency_ms =
(tracker.cache_stats.avg_hit_latency_ms * (total_hits - 1.0) + latency_ms) / total_hits;
Ok(())
}
pub fn record_cache_miss(&self, latency_ms: f64) -> Result<()> {
let mut tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
tracker.cache_stats.total_requests += 1;
tracker.cache_stats.cache_misses += 1;
let total_misses = tracker.cache_stats.cache_misses as f64;
tracker.cache_stats.avg_miss_latency_ms =
(tracker.cache_stats.avg_miss_latency_ms * (total_misses - 1.0) + latency_ms)
/ total_misses;
Ok(())
}
pub fn get_collection_statistics(&self) -> CollectionStatistics {
let mut stats = self.statistics.lock().map(|s| s.clone()).unwrap_or_default();
if let Ok(state) = self.collection_state.read() {
stats.total_samples = state.total_samples;
stats.error_count = state.error_count;
stats.avg_collection_time_ms = state.avg_collection_time_ms;
if let Some(start_time) = state.collection_start {
stats.collection_duration = start_time.elapsed();
if stats.collection_duration.as_secs() > 0 {
stats.average_sampling_rate =
state.total_samples as f64 / stats.collection_duration.as_secs() as f64;
}
}
if state.total_samples > 0 {
stats.success_rate = 1.0 - (state.error_count as f64 / state.total_samples as f64);
}
}
if let Ok(history) = self.metrics_history.lock() {
stats.history_size = history.len();
stats.current_memory_usage_mb = self.estimate_memory_usage(&history);
}
stats
}
pub fn update_config(&self, new_config: MobileProfilerConfig) -> Result<()> {
Self::validate_config(&new_config)?;
let mut config = self
.config
.write()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
*config = new_config;
tracing::info!("Updated collector configuration");
Ok(())
}
pub fn get_config(&self) -> MobileProfilerConfig {
self.config.read().map(|c| c.clone()).unwrap_or_default()
}
fn collect_metrics_internal(&self) -> Result<()> {
let collection_start = Instant::now();
let config = self
.config
.read()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
if !config.enabled {
return Err(TrustformersError::runtime_error("Collection is disabled".into()).into());
}
let mut collection_errors = Vec::new();
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| TrustformersError::other(format!("Time error: {}", e)))?
.as_millis() as u64;
let memory = if config.memory_profiling.enabled {
self.platform_collector.collect_memory_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
MemoryMetrics::default()
})
} else {
MemoryMetrics::default()
};
let cpu = if config.cpu_profiling.enabled {
self.platform_collector.collect_cpu_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
CpuMetrics::default()
})
} else {
CpuMetrics::default()
};
let gpu = if config.gpu_profiling.enabled {
self.platform_collector.collect_gpu_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
GpuMetrics::default()
})
} else {
GpuMetrics::default()
};
let network = if config.network_profiling.enabled {
self.collect_network_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
NetworkMetrics::default()
})
} else {
NetworkMetrics::default()
};
let inference = self.collect_inference_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
InferenceMetrics::default()
});
let thermal = self.platform_collector.collect_thermal_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
ThermalMetrics::default()
});
let battery = self.platform_collector.collect_battery_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
BatteryMetrics::default()
});
let platform = self.platform_collector.collect_platform_metrics().unwrap_or_else(|e| {
collection_errors.push(e);
PlatformMetrics::default()
});
let snapshot = MobileMetricsSnapshot {
timestamp,
memory,
cpu,
gpu,
network,
inference,
thermal,
battery,
platform,
};
if let Ok(mut current) = self.current_metrics.write() {
*current = snapshot.clone();
}
if let Ok(mut history) = self.metrics_history.lock() {
history.push_back(snapshot);
if history.len() > config.sampling.max_samples {
history.pop_front();
}
}
let collection_time_ms = collection_start.elapsed().as_millis() as f64;
if let Ok(mut state) = self.collection_state.write() {
state.total_samples += 1;
if !collection_errors.is_empty() {
state.error_count += 1;
}
let total_samples = state.total_samples as f64;
state.avg_collection_time_ms = (state.avg_collection_time_ms * (total_samples - 1.0)
+ collection_time_ms)
/ total_samples;
state.last_collection = Some(Instant::now());
}
if !collection_errors.is_empty() {
tracing::warn!(
"Collection completed with {} errors: {:?}",
collection_errors.len(),
collection_errors
);
}
tracing::trace!(
"Metrics collection completed in {:.2}ms",
collection_time_ms
);
Ok(())
}
fn collect_network_metrics(&self) -> Result<NetworkMetrics> {
Ok(NetworkMetrics {
bytes_sent: 1024000,
bytes_received: 2048000,
packets_sent: 2000,
packets_received: 3000,
connection_count: 5,
latency_ms: 45.0,
bandwidth_mbps: 25.0,
error_rate: 0.02,
})
}
fn collect_inference_metrics(&self) -> Result<InferenceMetrics> {
let tracker = self
.inference_tracker
.lock()
.map_err(|e| TrustformersError::runtime_error(format!("Lock error: {}", e)))?;
let total_inferences = tracker.completed_inferences.len() as u64;
let successful_inferences =
tracker.completed_inferences.iter().filter(|inf| inf.success).count() as u64;
let failed_inferences = total_inferences - successful_inferences;
let avg_latency_ms = if total_inferences > 0 {
tracker.completed_inferences.iter().map(|inf| inf.duration_ms).sum::<f64>()
/ total_inferences as f64
} else {
0.0
};
let min_latency_ms = tracker
.completed_inferences
.iter()
.map(|inf| inf.duration_ms)
.fold(f64::INFINITY, f64::min);
let max_latency_ms = tracker
.completed_inferences
.iter()
.map(|inf| inf.duration_ms)
.fold(0.0, f64::max);
let throughput_per_sec = if total_inferences > 0 && avg_latency_ms > 0.0 {
1000.0 / avg_latency_ms
} else {
0.0
};
let cache_hit_rate = if tracker.cache_stats.total_requests > 0 {
tracker.cache_stats.cache_hits as f64 / tracker.cache_stats.total_requests as f64
} else {
0.0
};
let model_load_time_ms = tracker.model_load_times.values().copied().fold(0.0, f64::max);
Ok(InferenceMetrics {
total_inferences,
successful_inferences,
failed_inferences,
avg_latency_ms,
min_latency_ms: if min_latency_ms.is_infinite() { 0.0 } else { min_latency_ms },
max_latency_ms,
throughput_per_sec,
cache_hit_rate,
model_load_time_ms,
})
}
fn validate_config(config: &MobileProfilerConfig) -> Result<()> {
if config.sampling.interval_ms == 0 {
return Err(TrustformersError::invalid_argument(
"Sampling interval must be > 0".into(),
)
.into());
}
if config.sampling.max_samples == 0 {
return Err(
TrustformersError::invalid_argument("Max samples must be > 0".into()).into(),
);
}
if config.sampling.high_freq_threshold_ms >= config.sampling.low_freq_threshold_ms {
return Err(TrustformersError::invalid_argument(
"High frequency threshold must be less than low frequency threshold".into(),
)
.into());
}
Ok(())
}
fn create_platform_collector(
config: Arc<RwLock<MobileProfilerConfig>>,
) -> Result<Arc<dyn PlatformCollector + Send + Sync>> {
#[cfg(target_os = "ios")]
{
Ok(Arc::new(IOSCollector { config }))
}
#[cfg(target_os = "android")]
{
Ok(Arc::new(AndroidCollector { config }))
}
#[cfg(not(any(target_os = "ios", target_os = "android")))]
{
Ok(Arc::new(GenericCollector { config }))
}
}
fn estimate_memory_usage(&self, history: &VecDeque<MobileMetricsSnapshot>) -> f32 {
let snapshot_size = std::mem::size_of::<MobileMetricsSnapshot>();
let total_size = snapshot_size * history.len();
total_size as f32 / (1024.0 * 1024.0) }
}
#[cfg(target_os = "ios")]
impl PlatformCollector for IOSCollector {
fn collect_memory_metrics(&self) -> Result<MemoryMetrics> {
use std::mem;
tracing::trace!("Collecting iOS memory metrics");
Ok(MemoryMetrics {
heap_used_mb: 128.0,
heap_free_mb: 256.0,
heap_total_mb: 384.0,
native_used_mb: 64.0,
graphics_used_mb: 32.0,
code_used_mb: 16.0,
stack_used_mb: 8.0,
other_used_mb: 24.0,
available_mb: 1024.0,
})
}
fn collect_cpu_metrics(&self) -> Result<CpuMetrics> {
tracing::trace!("Collecting iOS CPU metrics");
Ok(CpuMetrics {
usage_percent: 30.0,
user_percent: 20.0,
system_percent: 10.0,
idle_percent: 70.0,
frequency_mhz: 3200,
temperature_c: 38.0,
throttling_level: 0.1,
})
}
fn collect_gpu_metrics(&self) -> Result<GpuMetrics> {
tracing::trace!("Collecting iOS GPU metrics");
Ok(GpuMetrics {
usage_percent: 55.0,
memory_used_mb: 384.0,
memory_total_mb: 1536.0,
frequency_mhz: 1396,
temperature_c: 45.0,
power_mw: 4200.0,
})
}
fn collect_thermal_metrics(&self) -> Result<ThermalMetrics> {
tracing::trace!("Collecting iOS thermal metrics");
Ok(ThermalMetrics {
temperature_c: 42.0,
thermal_state: crate::device_info::ThermalState::Fair,
throttling_level: 0.1,
temperature_trend: TemperatureTrend::Rising,
})
}
fn collect_battery_metrics(&self) -> Result<BatteryMetrics> {
tracing::trace!("Collecting iOS battery metrics");
Ok(BatteryMetrics {
level_percent: 68,
is_charging: false,
power_consumption_mw: 3200.0,
estimated_life_minutes: 145,
})
}
fn collect_platform_metrics(&self) -> Result<PlatformMetrics> {
tracing::trace!("Collecting iOS platform metrics");
Ok(PlatformMetrics {
#[cfg(target_os = "ios")]
ios: Some(IOSMetrics {
metal_stats: MetalPerformanceStats::default(),
coreml_stats: CoreMLPerformanceStats::default(),
memory_pressure: IOSMemoryPressure::default(),
}),
#[cfg(target_os = "android")]
android: None,
})
}
fn platform_name(&self) -> &str {
"iOS"
}
fn supports_metric(&self, metric_type: &str) -> bool {
match metric_type {
"memory" | "cpu" | "gpu" | "thermal" | "battery" | "metal" | "coreml" => true,
_ => false,
}
}
}
#[cfg(target_os = "android")]
impl PlatformCollector for AndroidCollector {
fn collect_memory_metrics(&self) -> Result<MemoryMetrics> {
tracing::trace!("Collecting Android memory metrics");
Ok(MemoryMetrics {
heap_used_mb: 96.0,
heap_free_mb: 128.0,
heap_total_mb: 224.0,
native_used_mb: 48.0,
graphics_used_mb: 64.0,
code_used_mb: 12.0,
stack_used_mb: 4.0,
other_used_mb: 16.0,
available_mb: 512.0,
})
}
fn collect_cpu_metrics(&self) -> Result<CpuMetrics> {
tracing::trace!("Collecting Android CPU metrics");
Ok(CpuMetrics {
usage_percent: 35.0,
user_percent: 25.0,
system_percent: 10.0,
idle_percent: 65.0,
frequency_mhz: 2800,
temperature_c: 40.0,
throttling_level: 0.15,
})
}
fn collect_gpu_metrics(&self) -> Result<GpuMetrics> {
tracing::trace!("Collecting Android GPU metrics");
Ok(GpuMetrics {
usage_percent: 40.0,
memory_used_mb: 320.0,
memory_total_mb: 1024.0,
frequency_mhz: 950,
temperature_c: 38.0,
power_mw: 2800.0,
})
}
fn collect_thermal_metrics(&self) -> Result<ThermalMetrics> {
tracing::trace!("Collecting Android thermal metrics");
Ok(ThermalMetrics {
temperature_c: 45.0,
thermal_state: crate::device_info::ThermalState::Fair,
throttling_level: 0.2,
temperature_trend: TemperatureTrend::Rising,
})
}
fn collect_battery_metrics(&self) -> Result<BatteryMetrics> {
tracing::trace!("Collecting Android battery metrics");
Ok(BatteryMetrics {
level_percent: 72,
is_charging: true,
power_consumption_mw: 2800.0,
estimated_life_minutes: 220,
})
}
fn collect_platform_metrics(&self) -> Result<PlatformMetrics> {
tracing::trace!("Collecting Android platform metrics");
Ok(PlatformMetrics {
#[cfg(target_os = "ios")]
ios: None,
#[cfg(target_os = "android")]
android: Some(AndroidMetrics {
nnapi_stats: NNAPIPerformanceStats::default(),
gpu_delegate_stats: GPUDelegateStats::default(),
memory_stats: AndroidMemoryStats::default(),
doze_status: DozeStatus::default(),
}),
})
}
fn platform_name(&self) -> &str {
"Android"
}
fn supports_metric(&self, metric_type: &str) -> bool {
match metric_type {
"memory" | "cpu" | "gpu" | "thermal" | "battery" | "nnapi" | "doze" => true,
_ => false,
}
}
}
impl PlatformCollector for GenericCollector {
fn collect_memory_metrics(&self) -> Result<MemoryMetrics> {
tracing::trace!("Collecting generic memory metrics");
Ok(MemoryMetrics {
heap_used_mb: 64.0,
heap_free_mb: 128.0,
heap_total_mb: 192.0,
native_used_mb: 32.0,
graphics_used_mb: 16.0,
code_used_mb: 8.0,
stack_used_mb: 4.0,
other_used_mb: 12.0,
available_mb: 256.0,
})
}
fn collect_cpu_metrics(&self) -> Result<CpuMetrics> {
tracing::trace!("Collecting generic CPU metrics");
Ok(CpuMetrics {
usage_percent: 25.0,
user_percent: 15.0,
system_percent: 10.0,
idle_percent: 75.0,
frequency_mhz: 2400,
temperature_c: 35.0,
throttling_level: 0.0,
})
}
fn collect_gpu_metrics(&self) -> Result<GpuMetrics> {
tracing::trace!("Collecting generic GPU metrics");
Ok(GpuMetrics {
usage_percent: 20.0,
memory_used_mb: 128.0,
memory_total_mb: 512.0,
frequency_mhz: 800,
temperature_c: 40.0,
power_mw: 2000.0,
})
}
fn collect_thermal_metrics(&self) -> Result<ThermalMetrics> {
tracing::trace!("Collecting generic thermal metrics");
Ok(ThermalMetrics {
temperature_c: 35.0,
thermal_state: crate::device_info::ThermalState::Nominal,
throttling_level: 0.0,
temperature_trend: TemperatureTrend::Stable,
})
}
fn collect_battery_metrics(&self) -> Result<BatteryMetrics> {
tracing::trace!("Collecting generic battery metrics");
Ok(BatteryMetrics {
level_percent: 85,
is_charging: false,
power_consumption_mw: 1500.0,
estimated_life_minutes: 300,
})
}
fn collect_platform_metrics(&self) -> Result<PlatformMetrics> {
tracing::trace!("Collecting generic platform metrics");
Ok(PlatformMetrics {
#[cfg(target_os = "ios")]
ios: None,
#[cfg(target_os = "android")]
android: None,
})
}
fn platform_name(&self) -> &str {
"Generic"
}
fn supports_metric(&self, metric_type: &str) -> bool {
match metric_type {
"memory" | "cpu" | "gpu" | "thermal" | "battery" => true,
_ => false,
}
}
}
impl Default for CollectionStatistics {
fn default() -> Self {
Self {
total_samples: 0,
collection_duration: Duration::new(0, 0),
average_sampling_rate: 0.0,
history_size: 0,
current_memory_usage_mb: 0.0,
error_count: 0,
avg_collection_time_ms: 0.0,
success_rate: 1.0,
}
}
}
impl Default for CollectionState {
fn default() -> Self {
Self {
is_collecting: false,
collection_start: None,
last_collection: None,
total_samples: 0,
error_count: 0,
avg_collection_time_ms: 0.0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
fn fast_test_config() -> MobileProfilerConfig {
let mut config = MobileProfilerConfig::default();
config.memory_profiling.enabled = false;
config.cpu_profiling.enabled = false;
config.gpu_profiling.enabled = false;
config.network_profiling.enabled = false;
config.real_time_monitoring.enabled = false;
config.sampling.interval_ms = 10000; config.sampling.max_samples = 10;
config
}
#[test]
fn test_collector_creation() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config);
assert!(collector.is_ok());
}
#[test]
fn test_collector_configuration_validation() {
let mut config = MobileProfilerConfig::default();
config.sampling.interval_ms = 0;
let result = MobileMetricsCollector::new(config);
assert!(result.is_err());
}
#[test]
#[ignore] fn test_collection_lifecycle() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.start_collection().is_ok());
std::thread::sleep(Duration::from_millis(1));
assert!(collector.collect_metrics().is_ok());
let snapshot = collector.get_current_snapshot();
assert!(snapshot.is_ok());
assert!(collector.stop_collection().is_ok());
std::thread::sleep(Duration::from_millis(1));
}
#[test]
fn test_inference_tracking() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.start_inference_tracking("test_session", "test_model").is_ok());
std::thread::sleep(Duration::from_millis(1));
assert!(collector.end_inference_tracking("test_session", true).is_ok());
assert!(collector.collect_metrics().is_ok());
let snapshot = collector.get_current_snapshot().expect("Operation failed");
assert!(snapshot.inference.total_inferences > 0);
}
#[test]
fn test_cache_tracking() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.record_cache_hit(2.5).is_ok());
assert!(collector.record_cache_miss(15.0).is_ok());
assert!(collector.record_cache_hit(3.0).is_ok());
assert!(collector.collect_metrics().is_ok());
let snapshot = collector.get_current_snapshot().expect("Operation failed");
assert_eq!(snapshot.inference.cache_hit_rate, 2.0 / 3.0);
}
#[test]
fn test_model_load_time_tracking() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.record_model_load_time("test_model", 1500.0).is_ok());
assert!(collector.collect_metrics().is_ok());
let snapshot = collector.get_current_snapshot().expect("Operation failed");
assert_eq!(snapshot.inference.model_load_time_ms, 1500.0);
}
#[test]
fn test_statistics_collection() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.start_collection().is_ok());
std::thread::sleep(Duration::from_millis(1));
for _ in 0..5 {
assert!(collector.collect_metrics().is_ok());
}
assert!(collector.stop_collection().is_ok());
let stats = collector.get_collection_statistics();
assert_eq!(stats.total_samples, 6); assert!(stats.success_rate > 0.0);
assert!(stats.collection_duration.as_nanos() > 0);
}
#[test]
fn test_configuration_update() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
let mut new_config = collector.get_config();
new_config.sampling.interval_ms = 200;
assert!(collector.update_config(new_config).is_ok());
let updated_config = collector.get_config();
assert_eq!(updated_config.sampling.interval_ms, 200);
}
#[test]
fn test_history_management() {
let mut config = fast_test_config();
config.sampling.max_samples = 3;
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
for _ in 0..5 {
assert!(collector.collect_metrics().is_ok());
}
let history = collector.get_all_snapshots();
assert_eq!(history.len(), 3);
}
#[test]
fn test_memory_usage_estimation() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
for _ in 0..10 {
assert!(collector.collect_metrics().is_ok());
}
let stats = collector.get_collection_statistics();
assert!(stats.current_memory_usage_mb > 0.0);
}
#[test]
fn test_error_resilience() {
let config = fast_test_config();
let collector = MobileMetricsCollector::new(config).expect("Operation failed");
assert!(collector.collect_metrics().is_ok());
let snapshot = collector.get_current_snapshot().expect("Operation failed");
assert!(snapshot.timestamp > 0);
}
}