1use crate::device_info::{MobileDeviceInfo, PerformanceTier};
8use serde::{Deserialize, Serialize};
9use serde_json::json;
10use std::collections::{HashMap, VecDeque};
11use std::time::Instant;
12use trustformers_core::error::{CoreError, Result};
13use trustformers_core::TrustformersError;
14
15pub struct MobilePerformanceProfiler {
17 config: ProfilerConfig,
18 platform_profiler: Box<dyn PlatformProfiler + Send + Sync>,
19 metrics_collector: MetricsCollector,
20 bottleneck_detector: BottleneckDetector,
21 performance_analyzer: PerformanceAnalyzer,
22 alert_system: AlertSystem,
23 profiling_session: Option<ProfilingSession>,
24 historical_data: VecDeque<ProfileSnapshot>,
25}
26
27#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct ProfilerConfig {
30 pub enable_realtime_profiling: bool,
32 pub profiling_interval_ms: u64,
34 pub enable_platform_integration: bool,
36 pub max_history_size: usize,
38 pub metrics_config: MetricsConfig,
40 pub bottleneck_config: BottleneckConfig,
42 pub alert_thresholds: AlertThresholds,
44 pub enable_export: bool,
46 pub export_format: ExportFormat,
48}
49
50pub trait PlatformProfiler {
52 fn start_profiling(&mut self) -> Result<()>;
54
55 fn stop_profiling(&mut self) -> Result<()>;
57
58 fn collect_metrics(&self) -> Result<PlatformMetrics>;
60
61 fn export_data(&self, format: ExportFormat) -> Result<Vec<u8>>;
63
64 fn get_capabilities(&self) -> Vec<ProfilerCapability>;
66}
67
68#[derive(Debug, Clone, Serialize, Deserialize, Default)]
70pub struct PlatformMetrics {
71 pub cpu_metrics: CpuMetrics,
73 pub gpu_metrics: Option<GpuMetrics>,
75 pub memory_metrics: MemoryMetrics,
77 pub network_metrics: NetworkMetrics,
79 pub platform_specific: HashMap<String, f64>,
81}
82
83#[derive(Debug, Clone, Serialize, Deserialize)]
85pub struct CpuMetrics {
86 pub utilization_percent: f32,
88 pub per_core_utilization: Vec<f32>,
90 pub frequency_mhz: Vec<u32>,
92 pub context_switches_per_sec: u32,
94 pub load_average: [f32; 3],
96 pub user_time_percent: f32,
98 pub kernel_time_percent: f32,
100 pub idle_time_percent: f32,
102}
103
104#[derive(Debug, Clone, Serialize, Deserialize)]
106pub struct GpuMetrics {
107 pub utilization_percent: f32,
109 pub memory_utilization_percent: f32,
111 pub frequency_mhz: u32,
113 pub temperature_celsius: f32,
115 pub power_consumption_mw: f32,
117 pub active_shaders: u32,
119}
120
121#[derive(Debug, Clone, Serialize, Deserialize)]
123pub struct MemoryMetrics {
124 pub total_usage_mb: usize,
126 pub available_mb: usize,
128 pub pressure_level: MemoryPressureLevel,
130 pub page_faults_per_sec: u32,
132 pub allocations_per_sec: u32,
134 pub deallocations_per_sec: u32,
136 pub gc_metrics: Option<GcMetrics>,
138}
139
140#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
142pub enum MemoryPressureLevel {
143 Low,
144 Medium,
145 High,
146 Critical,
147}
148
149#[derive(Debug, Clone, Serialize, Deserialize)]
151pub struct GcMetrics {
152 pub total_gc_time_ms: u64,
154 pub gc_frequency_per_min: f32,
156 pub avg_pause_time_ms: f32,
158 pub memory_freed_mb: usize,
160}
161
162#[derive(Debug, Clone, Serialize, Deserialize)]
164pub struct NetworkMetrics {
165 pub bytes_sent_per_sec: u64,
167 pub bytes_received_per_sec: u64,
169 pub latency_ms: f32,
171 pub connection_count: u32,
173 pub errors_per_sec: u32,
175 pub connection_type: NetworkConnectionType,
177 pub signal_strength_dbm: Option<i32>,
179}
180
181#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
183pub enum NetworkConnectionType {
184 WiFi,
185 Cellular4G,
186 Cellular5G,
187 Ethernet,
188 Offline,
189 Unknown,
190}
191
192#[derive(Debug, Clone, Serialize, Deserialize)]
194pub struct MetricsConfig {
195 pub collect_cpu: bool,
197 pub collect_gpu: bool,
199 pub collect_memory: bool,
201 pub collect_network: bool,
203 pub collect_inference: bool,
205 pub sampling_rate_hz: u32,
207 pub detailed_collection: bool,
209}
210
211struct MetricsCollector {
213 config: MetricsConfig,
214 device_info: MobileDeviceInfo,
215 metrics_history: VecDeque<MetricsSnapshot>,
216 collection_start_time: Option<Instant>,
217}
218
219#[derive(Debug, Clone, Serialize, Deserialize)]
221pub struct MetricsSnapshot {
222 #[serde(skip, default = "Instant::now")]
223 pub timestamp: Instant,
224 pub platform_metrics: PlatformMetrics,
225 pub inference_metrics: InferenceMetrics,
226 pub thermal_metrics: Option<ThermalMetrics>,
227 pub battery_metrics: Option<BatteryPowerMetrics>,
228}
229
230#[derive(Debug, Clone, Serialize, Deserialize)]
232pub struct InferenceMetrics {
233 pub latency_ms: f32,
235 pub throughput_ips: f32,
237 pub queue_depth: usize,
239 pub model_load_time_ms: f32,
241 pub memory_footprint_mb: usize,
243 pub accuracy_score: Option<f32>,
245 pub backend_utilization: BackendUtilization,
247}
248
249#[derive(Debug, Clone, Serialize, Deserialize)]
251pub struct BackendUtilization {
252 pub cpu_percent: f32,
254 pub gpu_percent: Option<f32>,
256 pub npu_percent: Option<f32>,
258 pub custom_percent: Option<f32>,
260}
261
262#[derive(Debug, Clone, Serialize, Deserialize)]
264pub struct ThermalMetrics {
265 pub temperature_celsius: f32,
267 pub throttling_level: u8,
269 pub thermal_pressure: f32,
271 pub cooling_effectiveness: f32,
273}
274
275#[derive(Debug, Clone, Serialize, Deserialize)]
277pub struct BatteryPowerMetrics {
278 pub power_consumption_mw: f32,
280 pub drain_rate_percent_per_hour: f32,
282 pub power_efficiency: f32,
284 pub estimated_life_minutes: u32,
286}
287
288#[derive(Debug, Clone, Serialize, Deserialize)]
290pub struct BottleneckConfig {
291 pub detect_cpu_bottlenecks: bool,
293 pub detect_memory_bottlenecks: bool,
295 pub detect_io_bottlenecks: bool,
297 pub detect_thermal_bottlenecks: bool,
299 pub cpu_threshold_percent: f32,
301 pub memory_threshold_percent: f32,
303 pub analysis_window_samples: usize,
305}
306
307struct BottleneckDetector {
309 config: BottleneckConfig,
310 analysis_buffer: VecDeque<MetricsSnapshot>,
311 detected_bottlenecks: Vec<PerformanceBottleneck>,
312}
313
314#[derive(Debug, Clone, Serialize, Deserialize)]
316pub struct PerformanceBottleneck {
317 pub bottleneck_type: BottleneckType,
319 pub description: String,
321 pub severity: BottleneckSeverity,
323 pub duration_ms: u64,
325 pub performance_impact_percent: f32,
327 pub optimizations: Vec<OptimizationSuggestion>,
329 pub confidence: f32,
331}
332
333#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
335pub enum BottleneckType {
336 CPU,
337 Memory,
338 GPU,
339 IO,
340 Network,
341 Thermal,
342 Battery,
343 Backend,
344}
345
346#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
348pub enum BottleneckSeverity {
349 Low,
350 Medium,
351 High,
352 Critical,
353}
354
355#[derive(Debug, Clone, Serialize, Deserialize)]
357pub struct OptimizationSuggestion {
358 pub category: OptimizationCategory,
360 pub description: String,
362 pub expected_improvement_percent: f32,
364 pub difficulty: OptimizationDifficulty,
366 pub priority: OptimizationPriority,
368}
369
370#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
372pub enum OptimizationCategory {
373 ModelCompression,
374 MemoryOptimization,
375 ComputeOptimization,
376 ThermalManagement,
377 PowerOptimization,
378 NetworkOptimization,
379 CacheOptimization,
380}
381
382#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
384pub enum OptimizationDifficulty {
385 Easy,
386 Medium,
387 Hard,
388 Expert,
389}
390
391#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
393pub enum OptimizationPriority {
394 Low,
395 Medium,
396 High,
397 Critical,
398}
399
400struct PerformanceAnalyzer {
402 performance_patterns: Vec<PerformancePattern>,
403 regression_detector: RegressionDetector,
404 trend_analyzer: TrendAnalyzer,
405}
406
407#[derive(Debug, Clone, Serialize, Deserialize)]
409pub struct PerformancePattern {
410 pub pattern_type: PatternType,
412 pub description: String,
414 pub frequency: f32,
416 pub impact: f32,
418 pub suggested_actions: Vec<String>,
420}
421
422#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
424pub enum PatternType {
425 MemoryLeak,
426 CpuSpike,
427 ThermalThrottling,
428 BatteryDrain,
429 NetworkCongestion,
430 LoadBalanceIssue,
431 CacheInefficiency,
432}
433
434struct RegressionDetector {
436 baseline_metrics: Option<MetricsSnapshot>,
437 regression_threshold_percent: f32,
438 detected_regressions: Vec<PerformanceRegression>,
439}
440
441#[derive(Debug, Clone, Serialize, Deserialize)]
443pub struct PerformanceRegression {
444 pub metric_name: String,
446 pub baseline_value: f32,
448 pub current_value: f32,
450 pub regression_percent: f32,
452 pub severity: RegressionSeverity,
454}
455
456#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
458pub enum RegressionSeverity {
459 Minor,
460 Moderate,
461 Major,
462 Critical,
463}
464
465struct TrendAnalyzer {
467 trend_window_size: usize,
468 performance_trends: HashMap<String, PerformanceTrend>,
469}
470
471#[derive(Debug, Clone, Serialize, Deserialize)]
473pub struct PerformanceTrend {
474 pub direction: TrendDirection,
476 pub magnitude: f32,
478 pub confidence: f32,
480 pub prediction: Option<f32>,
482}
483
484#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
486pub enum TrendDirection {
487 Improving,
488 Stable,
489 Degrading,
490 Volatile,
491}
492
493struct AlertSystem {
495 thresholds: AlertThresholds,
496 active_alerts: Vec<PerformanceAlert>,
497 alert_history: VecDeque<PerformanceAlert>,
498}
499
500#[derive(Debug, Clone, Serialize, Deserialize)]
502pub struct AlertThresholds {
503 pub cpu_threshold_percent: f32,
505 pub memory_threshold_percent: f32,
507 pub latency_threshold_ms: f32,
509 pub temperature_threshold_celsius: f32,
511 pub battery_threshold_percent: u8,
513 pub power_threshold_mw: f32,
515}
516
517#[derive(Debug, Clone, Serialize, Deserialize)]
519pub struct PerformanceAlert {
520 pub alert_type: AlertType,
522 pub severity: AlertSeverity,
524 pub message: String,
526 pub trigger_value: f32,
528 pub threshold_value: f32,
530 #[serde(skip, default = "Instant::now")]
532 pub timestamp: Instant,
533 pub duration_ms: u64,
535 pub suggested_actions: Vec<String>,
537}
538
539#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
541pub enum AlertType {
542 HighCpuUsage,
543 HighMemoryUsage,
544 HighLatency,
545 HighTemperature,
546 LowBattery,
547 HighPowerConsumption,
548 PerformanceRegression,
549 SystemOverload,
550}
551
552#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
554pub enum AlertSeverity {
555 Info,
556 Warning,
557 Error,
558 Critical,
559}
560
561#[derive(Debug, Clone)]
563pub struct ProfilingSession {
564 session_id: String,
565 start_time: Instant,
566 end_time: Option<Instant>,
567 config: ProfilerConfig,
568 collected_snapshots: usize,
569 session_stats: SessionStats,
570}
571
572#[derive(Debug, Clone, Serialize, Deserialize)]
574pub struct SessionStats {
575 pub duration_ms: u64,
577 pub snapshots_collected: usize,
579 pub avg_sampling_rate_hz: f32,
581 pub data_size_bytes: usize,
583 pub bottlenecks_detected: usize,
585 pub alerts_triggered: usize,
587}
588
589#[derive(Debug, Clone, Serialize, Deserialize)]
591pub struct ProfileSnapshot {
592 #[serde(skip, default = "Instant::now")]
593 pub timestamp: Instant,
594 pub performance_score: f32,
595 pub bottlenecks: Vec<PerformanceBottleneck>,
596 pub alerts: Vec<PerformanceAlert>,
597 pub metrics: MetricsSnapshot,
598 pub optimization_suggestions: Vec<OptimizationSuggestion>,
599}
600
601#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
603pub enum ProfilerCapability {
604 CpuProfiling,
605 GpuProfiling,
606 MemoryProfiling,
607 NetworkProfiling,
608 ThermalProfiling,
609 BatteryProfiling,
610 InstrumentsIntegration,
611 SystraceIntegration,
612 PerfettoIntegration,
613 CustomProfiling,
614}
615
616#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
618pub enum ExportFormat {
619 JSON,
620 CSV,
621 Protobuf,
622 Trace,
623 Chrome,
624 Instruments,
625 Perfetto,
626}
627
628impl Default for ProfilerConfig {
629 fn default() -> Self {
630 Self {
631 enable_realtime_profiling: true,
632 profiling_interval_ms: 1000, enable_platform_integration: true,
634 max_history_size: 1000,
635 metrics_config: MetricsConfig::default(),
636 bottleneck_config: BottleneckConfig::default(),
637 alert_thresholds: AlertThresholds::default(),
638 enable_export: true,
639 export_format: ExportFormat::JSON,
640 }
641 }
642}
643
644impl Default for MetricsConfig {
645 fn default() -> Self {
646 Self {
647 collect_cpu: true,
648 collect_gpu: true,
649 collect_memory: true,
650 collect_network: true,
651 collect_inference: true,
652 sampling_rate_hz: 10, detailed_collection: false,
654 }
655 }
656}
657
658impl Default for BottleneckConfig {
659 fn default() -> Self {
660 Self {
661 detect_cpu_bottlenecks: true,
662 detect_memory_bottlenecks: true,
663 detect_io_bottlenecks: true,
664 detect_thermal_bottlenecks: true,
665 cpu_threshold_percent: 80.0,
666 memory_threshold_percent: 85.0,
667 analysis_window_samples: 100,
668 }
669 }
670}
671
672impl Default for AlertThresholds {
673 fn default() -> Self {
674 Self {
675 cpu_threshold_percent: 90.0,
676 memory_threshold_percent: 90.0,
677 latency_threshold_ms: 500.0,
678 temperature_threshold_celsius: 85.0,
679 battery_threshold_percent: 20,
680 power_threshold_mw: 5000.0, }
682 }
683}
684
685impl MobilePerformanceProfiler {
686 pub fn new(config: ProfilerConfig, device_info: &MobileDeviceInfo) -> Result<Self> {
688 let platform_profiler = Self::create_platform_profiler(device_info)?;
689 let metrics_collector =
690 MetricsCollector::new(config.metrics_config.clone(), device_info.clone());
691 let bottleneck_detector = BottleneckDetector::new(config.bottleneck_config.clone());
692 let performance_analyzer = PerformanceAnalyzer::new();
693 let alert_system = AlertSystem::new(config.alert_thresholds.clone());
694
695 Ok(Self {
696 config: config.clone(),
697 platform_profiler,
698 metrics_collector,
699 bottleneck_detector,
700 performance_analyzer,
701 alert_system,
702 profiling_session: None,
703 historical_data: VecDeque::with_capacity(config.max_history_size),
704 })
705 }
706
707 fn create_platform_profiler(
709 device_info: &MobileDeviceInfo,
710 ) -> Result<Box<dyn PlatformProfiler + Send + Sync>> {
711 match device_info.basic_info.platform {
712 crate::MobilePlatform::Ios => Ok(Box::new(IOSProfiler::new()?)),
713 crate::MobilePlatform::Android => Ok(Box::new(AndroidProfiler::new()?)),
714 crate::MobilePlatform::Generic => Ok(Box::new(GenericProfiler::new()?)),
715 }
716 }
717
718 pub fn start_session(&mut self, session_id: String) -> Result<()> {
720 if self.profiling_session.is_some() {
721 return Err(TrustformersError::config_error(
722 "Profiling session already active",
723 "start_session",
724 )
725 .into());
726 }
727
728 self.profiling_session = Some(ProfilingSession {
729 session_id: session_id.clone(),
730 start_time: Instant::now(),
731 end_time: None,
732 config: self.config.clone(),
733 collected_snapshots: 0,
734 session_stats: SessionStats {
735 duration_ms: 0,
736 snapshots_collected: 0,
737 avg_sampling_rate_hz: 0.0,
738 data_size_bytes: 0,
739 bottlenecks_detected: 0,
740 alerts_triggered: 0,
741 },
742 });
743
744 if self.config.enable_platform_integration {
745 self.platform_profiler.start_profiling()?;
746 }
747
748 self.metrics_collector.start()?;
749
750 Ok(())
751 }
752
753 pub fn stop_session(&mut self) -> Result<SessionStats> {
755 let session = self.profiling_session.take().ok_or_else(|| {
756 TrustformersError::config_error("No active profiling session", "stop_session")
757 })?;
758
759 if self.config.enable_platform_integration {
760 self.platform_profiler.stop_profiling()?;
761 }
762
763 self.metrics_collector.stop()?;
764
765 let duration = session.start_time.elapsed();
766 let stats = SessionStats {
767 duration_ms: duration.as_millis() as u64,
768 snapshots_collected: session.collected_snapshots,
769 avg_sampling_rate_hz: session.collected_snapshots as f32 / duration.as_secs() as f32,
770 data_size_bytes: self.estimate_data_size(),
771 bottlenecks_detected: self.bottleneck_detector.detected_bottlenecks.len(),
772 alerts_triggered: self.alert_system.alert_history.len(),
773 };
774
775 Ok(stats)
776 }
777
778 pub fn collect_snapshot(&mut self) -> Result<ProfileSnapshot> {
780 let platform_metrics = if self.config.enable_platform_integration {
781 self.platform_profiler.collect_metrics()?
782 } else {
783 PlatformMetrics::default()
784 };
785
786 let metrics_snapshot = self.metrics_collector.collect_snapshot(platform_metrics)?;
787
788 let bottlenecks = self.bottleneck_detector.analyze(&metrics_snapshot)?;
790
791 let alerts = self.alert_system.check_thresholds(&metrics_snapshot)?;
793
794 let optimizations = self
796 .performance_analyzer
797 .generate_suggestions(&metrics_snapshot, &bottlenecks)?;
798
799 let performance_score = self.calculate_performance_score(&metrics_snapshot, &bottlenecks);
801
802 let snapshot = ProfileSnapshot {
803 timestamp: Instant::now(),
804 performance_score,
805 bottlenecks,
806 alerts,
807 metrics: metrics_snapshot,
808 optimization_suggestions: optimizations,
809 };
810
811 if let Some(ref mut session) = self.profiling_session {
813 session.collected_snapshots += 1;
814 }
815
816 self.historical_data.push_back(snapshot.clone());
818 if self.historical_data.len() > self.config.max_history_size {
819 self.historical_data.pop_front();
820 }
821
822 Ok(snapshot)
823 }
824
825 fn calculate_performance_score(
827 &self,
828 metrics: &MetricsSnapshot,
829 bottlenecks: &[PerformanceBottleneck],
830 ) -> f32 {
831 let mut score = 100.0;
832
833 if metrics.platform_metrics.cpu_metrics.utilization_percent > 80.0 {
835 score -= (metrics.platform_metrics.cpu_metrics.utilization_percent - 80.0) * 0.5;
836 }
837
838 match metrics.platform_metrics.memory_metrics.pressure_level {
840 MemoryPressureLevel::Medium => score -= 10.0,
841 MemoryPressureLevel::High => score -= 25.0,
842 MemoryPressureLevel::Critical => score -= 50.0,
843 _ => {},
844 }
845
846 if metrics.inference_metrics.latency_ms > 100.0 {
848 score -= (metrics.inference_metrics.latency_ms - 100.0) * 0.1;
849 }
850
851 for bottleneck in bottlenecks {
853 let penalty = match bottleneck.severity {
854 BottleneckSeverity::Low => 5.0,
855 BottleneckSeverity::Medium => 10.0,
856 BottleneckSeverity::High => 20.0,
857 BottleneckSeverity::Critical => 40.0,
858 };
859 score -= penalty;
860 }
861
862 score.max(0.0).min(100.0)
863 }
864
865 fn estimate_data_size(&self) -> usize {
867 self.historical_data.len() * 2048 }
870
871 pub fn export_data(&self, format: ExportFormat) -> Result<Vec<u8>> {
873 match format {
874 ExportFormat::JSON => {
875 let data = serde_json::to_vec(&self.historical_data)
876 .map_err(|e| TrustformersError::serialization_error(e.to_string()))?;
877 Ok(data)
878 },
879 _ => {
880 self.platform_profiler.export_data(format)
882 },
883 }
884 }
885
886 pub fn get_capabilities(&self) -> Vec<ProfilerCapability> {
888 self.platform_profiler.get_capabilities()
889 }
890
891 pub fn get_statistics(&self) -> Result<ProfilingStatistics> {
893 let current_session = self.profiling_session.as_ref();
894
895 Ok(ProfilingStatistics {
896 total_snapshots: self.historical_data.len(),
897 average_performance_score: self.calculate_average_performance_score(),
898 active_bottlenecks: self.bottleneck_detector.detected_bottlenecks.len(),
899 active_alerts: self.alert_system.active_alerts.len(),
900 session_duration_ms: current_session
901 .map(|s| s.start_time.elapsed().as_millis() as u64)
902 .unwrap_or(0),
903 data_collection_rate_hz: self.calculate_data_collection_rate(),
904 })
905 }
906
907 fn calculate_average_performance_score(&self) -> f32 {
908 if self.historical_data.is_empty() {
909 return 0.0;
910 }
911
912 let sum: f32 = self.historical_data.iter().map(|s| s.performance_score).sum();
913 sum / self.historical_data.len() as f32
914 }
915
916 fn calculate_data_collection_rate(&self) -> f32 {
917 if let Some(session) = &self.profiling_session {
918 let duration_secs = session.start_time.elapsed().as_secs_f32();
919 if duration_secs > 0.0 {
920 return session.collected_snapshots as f32 / duration_secs;
921 }
922 }
923 0.0
924 }
925}
926
927#[derive(Debug, Clone, Serialize, Deserialize)]
929pub struct ProfilingStatistics {
930 pub total_snapshots: usize,
932 pub average_performance_score: f32,
934 pub active_bottlenecks: usize,
936 pub active_alerts: usize,
938 pub session_duration_ms: u64,
940 pub data_collection_rate_hz: f32,
942}
943
944pub struct IOSProfiler {
946 instruments_integration: bool,
947 capabilities: Vec<ProfilerCapability>,
948}
949
950pub struct AndroidProfiler {
951 systrace_integration: bool,
952 perfetto_integration: bool,
953 capabilities: Vec<ProfilerCapability>,
954}
955
956pub struct GenericProfiler {
957 capabilities: Vec<ProfilerCapability>,
958}
959
960impl IOSProfiler {
962 pub fn new() -> Result<Self> {
963 Ok(Self {
964 instruments_integration: Self::check_instruments_availability(),
965 capabilities: vec![
966 ProfilerCapability::CpuProfiling,
967 ProfilerCapability::MemoryProfiling,
968 ProfilerCapability::GpuProfiling,
969 ProfilerCapability::ThermalProfiling,
970 ProfilerCapability::BatteryProfiling,
971 ProfilerCapability::InstrumentsIntegration,
972 ],
973 })
974 }
975
976 fn check_instruments_availability() -> bool {
977 #[cfg(target_os = "ios")]
979 {
980 true }
983 #[cfg(not(target_os = "ios"))]
984 {
985 false
986 }
987 }
988}
989
990impl PlatformProfiler for IOSProfiler {
991 fn start_profiling(&mut self) -> Result<()> {
992 Ok(())
994 }
995
996 fn stop_profiling(&mut self) -> Result<()> {
997 Ok(())
999 }
1000
1001 fn collect_metrics(&self) -> Result<PlatformMetrics> {
1002 Ok(PlatformMetrics::default())
1004 }
1005
1006 fn export_data(&self, format: ExportFormat) -> Result<Vec<u8>> {
1007 match format {
1008 ExportFormat::Instruments => {
1009 Ok(vec![])
1011 },
1012 _ => Err(TrustformersError::config_error(
1013 "Export format not supported on iOS",
1014 "export_ios_data",
1015 )
1016 .into()),
1017 }
1018 }
1019
1020 fn get_capabilities(&self) -> Vec<ProfilerCapability> {
1021 self.capabilities.clone()
1022 }
1023}
1024
1025impl AndroidProfiler {
1026 pub fn new() -> Result<Self> {
1027 Ok(Self {
1028 systrace_integration: Self::check_systrace_availability(),
1029 perfetto_integration: Self::check_perfetto_availability(),
1030 capabilities: vec![
1031 ProfilerCapability::CpuProfiling,
1032 ProfilerCapability::MemoryProfiling,
1033 ProfilerCapability::GpuProfiling,
1034 ProfilerCapability::NetworkProfiling,
1035 ProfilerCapability::SystraceIntegration,
1036 ProfilerCapability::PerfettoIntegration,
1037 ],
1038 })
1039 }
1040
1041 fn check_systrace_availability() -> bool {
1042 #[cfg(target_os = "android")]
1044 {
1045 true }
1047 #[cfg(not(target_os = "android"))]
1048 {
1049 false
1050 }
1051 }
1052
1053 fn check_perfetto_availability() -> bool {
1054 #[cfg(target_os = "android")]
1056 {
1057 true }
1059 #[cfg(not(target_os = "android"))]
1060 {
1061 false
1062 }
1063 }
1064}
1065
1066impl PlatformProfiler for AndroidProfiler {
1067 fn start_profiling(&mut self) -> Result<()> {
1068 Ok(())
1070 }
1071
1072 fn stop_profiling(&mut self) -> Result<()> {
1073 Ok(())
1075 }
1076
1077 fn collect_metrics(&self) -> Result<PlatformMetrics> {
1078 Ok(PlatformMetrics::default())
1080 }
1081
1082 fn export_data(&self, format: ExportFormat) -> Result<Vec<u8>> {
1083 match format {
1084 ExportFormat::Trace | ExportFormat::Perfetto => {
1085 Ok(vec![])
1087 },
1088 _ => Err(TrustformersError::config_error(
1089 "Export format not supported on Android",
1090 "export_android_data",
1091 )
1092 .into()),
1093 }
1094 }
1095
1096 fn get_capabilities(&self) -> Vec<ProfilerCapability> {
1097 self.capabilities.clone()
1098 }
1099}
1100
1101impl GenericProfiler {
1102 pub fn new() -> Result<Self> {
1103 Ok(Self {
1104 capabilities: vec![
1105 ProfilerCapability::CpuProfiling,
1106 ProfilerCapability::MemoryProfiling,
1107 ProfilerCapability::NetworkProfiling,
1108 ProfilerCapability::CustomProfiling,
1109 ],
1110 })
1111 }
1112}
1113
1114impl PlatformProfiler for GenericProfiler {
1115 fn start_profiling(&mut self) -> Result<()> {
1116 Ok(())
1118 }
1119
1120 fn stop_profiling(&mut self) -> Result<()> {
1121 Ok(())
1123 }
1124
1125 fn collect_metrics(&self) -> Result<PlatformMetrics> {
1126 Ok(PlatformMetrics::default())
1128 }
1129
1130 fn export_data(&self, format: ExportFormat) -> Result<Vec<u8>> {
1131 match format {
1132 ExportFormat::JSON | ExportFormat::CSV => {
1133 Ok(vec![])
1135 },
1136 _ => Err(TrustformersError::config_error(
1137 "Export format not supported",
1138 "export_profiling_data",
1139 )
1140 .into()),
1141 }
1142 }
1143
1144 fn get_capabilities(&self) -> Vec<ProfilerCapability> {
1145 self.capabilities.clone()
1146 }
1147}
1148
1149impl Default for CpuMetrics {
1150 fn default() -> Self {
1151 Self {
1152 utilization_percent: 0.0,
1153 per_core_utilization: vec![],
1154 frequency_mhz: vec![],
1155 context_switches_per_sec: 0,
1156 load_average: [0.0, 0.0, 0.0],
1157 user_time_percent: 0.0,
1158 kernel_time_percent: 0.0,
1159 idle_time_percent: 100.0,
1160 }
1161 }
1162}
1163
1164impl Default for MemoryMetrics {
1165 fn default() -> Self {
1166 Self {
1167 total_usage_mb: 0,
1168 available_mb: 0,
1169 pressure_level: MemoryPressureLevel::Low,
1170 page_faults_per_sec: 0,
1171 allocations_per_sec: 0,
1172 deallocations_per_sec: 0,
1173 gc_metrics: None,
1174 }
1175 }
1176}
1177
1178impl Default for NetworkMetrics {
1179 fn default() -> Self {
1180 Self {
1181 bytes_sent_per_sec: 0,
1182 bytes_received_per_sec: 0,
1183 latency_ms: 0.0,
1184 connection_count: 0,
1185 errors_per_sec: 0,
1186 connection_type: NetworkConnectionType::Unknown,
1187 signal_strength_dbm: None,
1188 }
1189 }
1190}
1191
1192impl MetricsCollector {
1194 fn new(config: MetricsConfig, device_info: MobileDeviceInfo) -> Self {
1195 Self {
1196 config,
1197 device_info,
1198 metrics_history: VecDeque::new(),
1199 collection_start_time: None,
1200 }
1201 }
1202
1203 fn start(&mut self) -> Result<()> {
1204 self.collection_start_time = Some(Instant::now());
1205 Ok(())
1206 }
1207
1208 fn stop(&mut self) -> Result<()> {
1209 self.collection_start_time = None;
1210 Ok(())
1211 }
1212
1213 fn collect_snapshot(&mut self, platform_metrics: PlatformMetrics) -> Result<MetricsSnapshot> {
1214 let snapshot = MetricsSnapshot {
1215 timestamp: Instant::now(),
1216 platform_metrics,
1217 inference_metrics: InferenceMetrics::default(),
1218 thermal_metrics: None,
1219 battery_metrics: None,
1220 };
1221
1222 self.metrics_history.push_back(snapshot.clone());
1223 Ok(snapshot)
1224 }
1225}
1226
1227impl Default for InferenceMetrics {
1228 fn default() -> Self {
1229 Self {
1230 latency_ms: 0.0,
1231 throughput_ips: 0.0,
1232 queue_depth: 0,
1233 model_load_time_ms: 0.0,
1234 memory_footprint_mb: 0,
1235 accuracy_score: None,
1236 backend_utilization: BackendUtilization::default(),
1237 }
1238 }
1239}
1240
1241impl Default for BackendUtilization {
1242 fn default() -> Self {
1243 Self {
1244 cpu_percent: 0.0,
1245 gpu_percent: None,
1246 npu_percent: None,
1247 custom_percent: None,
1248 }
1249 }
1250}
1251
1252impl BottleneckDetector {
1253 fn new(config: BottleneckConfig) -> Self {
1254 Self {
1255 config,
1256 analysis_buffer: VecDeque::new(),
1257 detected_bottlenecks: vec![],
1258 }
1259 }
1260
1261 fn analyze(&mut self, metrics: &MetricsSnapshot) -> Result<Vec<PerformanceBottleneck>> {
1262 self.analysis_buffer.push_back(metrics.clone());
1263
1264 if self.analysis_buffer.len() > self.config.analysis_window_samples {
1265 self.analysis_buffer.pop_front();
1266 }
1267
1268 let mut bottlenecks = vec![];
1270
1271 if self.config.detect_cpu_bottlenecks
1273 && metrics.platform_metrics.cpu_metrics.utilization_percent
1274 > self.config.cpu_threshold_percent
1275 {
1276 bottlenecks.push(PerformanceBottleneck {
1277 bottleneck_type: BottleneckType::CPU,
1278 description: format!(
1279 "High CPU utilization detected: {:.1}%",
1280 metrics.platform_metrics.cpu_metrics.utilization_percent
1281 ),
1282 severity: self.calculate_bottleneck_severity(
1283 metrics.platform_metrics.cpu_metrics.utilization_percent,
1284 self.config.cpu_threshold_percent,
1285 ),
1286 duration_ms: 0, performance_impact_percent: metrics
1288 .platform_metrics
1289 .cpu_metrics
1290 .utilization_percent
1291 - self.config.cpu_threshold_percent,
1292 optimizations: vec![OptimizationSuggestion {
1293 category: OptimizationCategory::ComputeOptimization,
1294 description: "Consider reducing inference frequency or using model compression"
1295 .to_string(),
1296 expected_improvement_percent: 20.0,
1297 difficulty: OptimizationDifficulty::Medium,
1298 priority: OptimizationPriority::High,
1299 }],
1300 confidence: 0.9,
1301 });
1302 }
1303
1304 if self.config.detect_memory_bottlenecks
1306 && matches!(
1307 metrics.platform_metrics.memory_metrics.pressure_level,
1308 MemoryPressureLevel::High | MemoryPressureLevel::Critical
1309 )
1310 {
1311 bottlenecks.push(PerformanceBottleneck {
1312 bottleneck_type: BottleneckType::Memory,
1313 description: format!(
1314 "High memory pressure detected: {:?}",
1315 metrics.platform_metrics.memory_metrics.pressure_level
1316 ),
1317 severity: match metrics.platform_metrics.memory_metrics.pressure_level {
1318 MemoryPressureLevel::High => BottleneckSeverity::High,
1319 MemoryPressureLevel::Critical => BottleneckSeverity::Critical,
1320 _ => BottleneckSeverity::Medium,
1321 },
1322 duration_ms: 0,
1323 performance_impact_percent: 30.0,
1324 optimizations: vec![OptimizationSuggestion {
1325 category: OptimizationCategory::MemoryOptimization,
1326 description: "Enable aggressive memory optimization and model quantization"
1327 .to_string(),
1328 expected_improvement_percent: 40.0,
1329 difficulty: OptimizationDifficulty::Easy,
1330 priority: OptimizationPriority::Critical,
1331 }],
1332 confidence: 0.95,
1333 });
1334 }
1335
1336 self.detected_bottlenecks = bottlenecks.clone();
1337 Ok(bottlenecks)
1338 }
1339
1340 fn calculate_bottleneck_severity(
1341 &self,
1342 current_value: f32,
1343 threshold: f32,
1344 ) -> BottleneckSeverity {
1345 let ratio = current_value / threshold;
1346 if ratio > 1.5 {
1347 BottleneckSeverity::Critical
1348 } else if ratio > 1.25 {
1349 BottleneckSeverity::High
1350 } else if ratio > 1.1 {
1351 BottleneckSeverity::Medium
1352 } else {
1353 BottleneckSeverity::Low
1354 }
1355 }
1356}
1357
1358impl PerformanceAnalyzer {
1359 fn new() -> Self {
1360 Self {
1361 performance_patterns: vec![],
1362 regression_detector: RegressionDetector::new(),
1363 trend_analyzer: TrendAnalyzer::new(),
1364 }
1365 }
1366
1367 fn generate_suggestions(
1368 &mut self,
1369 metrics: &MetricsSnapshot,
1370 bottlenecks: &[PerformanceBottleneck],
1371 ) -> Result<Vec<OptimizationSuggestion>> {
1372 let mut suggestions = vec![];
1373
1374 for bottleneck in bottlenecks {
1376 suggestions.extend(bottleneck.optimizations.clone());
1377 }
1378
1379 if metrics.inference_metrics.latency_ms > 200.0 {
1381 suggestions.push(OptimizationSuggestion {
1382 category: OptimizationCategory::ComputeOptimization,
1383 description: "High inference latency detected. Consider model optimization or hardware acceleration".to_string(),
1384 expected_improvement_percent: 50.0,
1385 difficulty: OptimizationDifficulty::Medium,
1386 priority: OptimizationPriority::High,
1387 });
1388 }
1389
1390 Ok(suggestions)
1391 }
1392}
1393
1394impl RegressionDetector {
1395 fn new() -> Self {
1396 Self {
1397 baseline_metrics: None,
1398 regression_threshold_percent: 10.0,
1399 detected_regressions: vec![],
1400 }
1401 }
1402}
1403
1404impl TrendAnalyzer {
1405 fn new() -> Self {
1406 Self {
1407 trend_window_size: 50,
1408 performance_trends: HashMap::new(),
1409 }
1410 }
1411}
1412
1413impl AlertSystem {
1414 fn new(thresholds: AlertThresholds) -> Self {
1415 Self {
1416 thresholds,
1417 active_alerts: vec![],
1418 alert_history: VecDeque::new(),
1419 }
1420 }
1421
1422 fn check_thresholds(&mut self, metrics: &MetricsSnapshot) -> Result<Vec<PerformanceAlert>> {
1423 let mut alerts = vec![];
1424
1425 if metrics.platform_metrics.cpu_metrics.utilization_percent
1427 > self.thresholds.cpu_threshold_percent
1428 {
1429 alerts.push(PerformanceAlert {
1430 alert_type: AlertType::HighCpuUsage,
1431 severity: AlertSeverity::Warning,
1432 message: format!(
1433 "CPU utilization is {:.1}%, exceeding threshold of {:.1}%",
1434 metrics.platform_metrics.cpu_metrics.utilization_percent,
1435 self.thresholds.cpu_threshold_percent
1436 ),
1437 trigger_value: metrics.platform_metrics.cpu_metrics.utilization_percent,
1438 threshold_value: self.thresholds.cpu_threshold_percent,
1439 timestamp: Instant::now(),
1440 duration_ms: 0,
1441 suggested_actions: vec![
1442 "Reduce inference frequency".to_string(),
1443 "Enable CPU throttling".to_string(),
1444 "Optimize model computation".to_string(),
1445 ],
1446 });
1447 }
1448
1449 let memory_usage_percent = if metrics.platform_metrics.memory_metrics.total_usage_mb > 0 {
1451 (metrics.platform_metrics.memory_metrics.total_usage_mb as f32
1452 / (metrics.platform_metrics.memory_metrics.total_usage_mb
1453 + metrics.platform_metrics.memory_metrics.available_mb)
1454 as f32)
1455 * 100.0
1456 } else {
1457 0.0
1458 };
1459
1460 if memory_usage_percent > self.thresholds.memory_threshold_percent {
1461 alerts.push(PerformanceAlert {
1462 alert_type: AlertType::HighMemoryUsage,
1463 severity: AlertSeverity::Warning,
1464 message: format!(
1465 "Memory utilization is {:.1}%, exceeding threshold of {:.1}%",
1466 memory_usage_percent, self.thresholds.memory_threshold_percent
1467 ),
1468 trigger_value: memory_usage_percent,
1469 threshold_value: self.thresholds.memory_threshold_percent,
1470 timestamp: Instant::now(),
1471 duration_ms: 0,
1472 suggested_actions: vec![
1473 "Enable memory optimization".to_string(),
1474 "Reduce model size".to_string(),
1475 "Clear model cache".to_string(),
1476 ],
1477 });
1478 }
1479
1480 if metrics.inference_metrics.latency_ms > self.thresholds.latency_threshold_ms {
1482 alerts.push(PerformanceAlert {
1483 alert_type: AlertType::HighLatency,
1484 severity: AlertSeverity::Error,
1485 message: format!(
1486 "Inference latency is {:.1}ms, exceeding threshold of {:.1}ms",
1487 metrics.inference_metrics.latency_ms, self.thresholds.latency_threshold_ms
1488 ),
1489 trigger_value: metrics.inference_metrics.latency_ms,
1490 threshold_value: self.thresholds.latency_threshold_ms,
1491 timestamp: Instant::now(),
1492 duration_ms: 0,
1493 suggested_actions: vec![
1494 "Enable hardware acceleration".to_string(),
1495 "Optimize model architecture".to_string(),
1496 "Reduce batch size".to_string(),
1497 ],
1498 });
1499 }
1500
1501 for alert in &alerts {
1503 self.alert_history.push_back(alert.clone());
1504 }
1505
1506 self.active_alerts = alerts.clone();
1507 Ok(alerts)
1508 }
1509}
1510
1511pub struct MobileProfilerUtils;
1513
1514impl MobileProfilerUtils {
1515 pub fn create_optimized_config(device_info: &MobileDeviceInfo) -> ProfilerConfig {
1517 let mut config = ProfilerConfig::default();
1518
1519 match device_info.performance_scores.overall_tier {
1521 PerformanceTier::VeryLow => {
1522 config.profiling_interval_ms = 10000; config.metrics_config.sampling_rate_hz = 0; config.max_history_size = 50;
1525 config.metrics_config.detailed_collection = false;
1526 },
1527 PerformanceTier::Low => {
1528 config.profiling_interval_ms = 8000; config.metrics_config.sampling_rate_hz = 0; config.max_history_size = 75;
1531 config.metrics_config.detailed_collection = false;
1532 },
1533 PerformanceTier::Budget => {
1534 config.profiling_interval_ms = 5000; config.metrics_config.sampling_rate_hz = 1; config.max_history_size = 100;
1537 config.metrics_config.detailed_collection = false;
1538 },
1539 PerformanceTier::Medium => {
1540 config.profiling_interval_ms = 3000; config.metrics_config.sampling_rate_hz = 2; config.max_history_size = 300;
1543 config.metrics_config.detailed_collection = false;
1544 },
1545 PerformanceTier::Mid => {
1546 config.profiling_interval_ms = 2000; config.metrics_config.sampling_rate_hz = 5; config.max_history_size = 500;
1549 },
1550 PerformanceTier::High => {
1551 config.profiling_interval_ms = 1000; config.metrics_config.sampling_rate_hz = 10; config.max_history_size = 1000;
1554 config.metrics_config.detailed_collection = true;
1555 },
1556 PerformanceTier::VeryHigh => {
1557 config.profiling_interval_ms = 750; config.metrics_config.sampling_rate_hz = 15; config.max_history_size = 1500;
1560 config.metrics_config.detailed_collection = true;
1561 },
1562 PerformanceTier::Flagship => {
1563 config.profiling_interval_ms = 500; config.metrics_config.sampling_rate_hz = 20; config.max_history_size = 2000;
1566 config.metrics_config.detailed_collection = true;
1567 },
1568 }
1569
1570 if device_info.gpu_info.is_some() {
1572 config.metrics_config.collect_gpu = true;
1573 }
1574
1575 config
1576 }
1577
1578 pub fn calculate_efficiency_score(metrics: &MetricsSnapshot) -> f32 {
1580 let cpu_efficiency = 100.0 - metrics.platform_metrics.cpu_metrics.utilization_percent;
1581 let memory_efficiency = match metrics.platform_metrics.memory_metrics.pressure_level {
1582 MemoryPressureLevel::Low => 100.0,
1583 MemoryPressureLevel::Medium => 75.0,
1584 MemoryPressureLevel::High => 50.0,
1585 MemoryPressureLevel::Critical => 25.0,
1586 };
1587
1588 let inference_efficiency = if metrics.inference_metrics.latency_ms > 0.0 {
1589 (1000.0 / metrics.inference_metrics.latency_ms).min(100.0)
1590 } else {
1591 100.0
1592 };
1593
1594 (cpu_efficiency + memory_efficiency + inference_efficiency) / 3.0
1595 }
1596
1597 pub fn export_to_chrome_trace(snapshots: &[ProfileSnapshot]) -> Result<String> {
1599 let trace_data = json!({
1601 "traceEvents": snapshots.iter().map(|snapshot| {
1602 json!({
1603 "name": "Performance Snapshot",
1604 "ph": "X",
1605 "ts": 0, "dur": 1000,
1607 "pid": 1,
1608 "tid": 1,
1609 "args": {
1610 "performance_score": snapshot.performance_score,
1611 "cpu_usage": snapshot.metrics.platform_metrics.cpu_metrics.utilization_percent,
1612 "memory_pressure": snapshot.metrics.platform_metrics.memory_metrics.pressure_level,
1613 "inference_latency": snapshot.metrics.inference_metrics.latency_ms
1614 }
1615 })
1616 }).collect::<Vec<_>>()
1617 });
1618
1619 Ok(trace_data.to_string())
1620 }
1621}
1622
1623#[cfg(test)]
1624mod tests {
1625 use super::*;
1626 use crate::device_info::{BasicDeviceInfo, CpuInfo, MemoryInfo, PerformanceScores};
1627
1628 fn create_test_device_info() -> MobileDeviceInfo {
1629 MobileDeviceInfo {
1630 platform: crate::MobilePlatform::Generic,
1631 basic_info: BasicDeviceInfo {
1632 platform: crate::MobilePlatform::Generic,
1633 manufacturer: "Test".to_string(),
1634 model: "TestDevice".to_string(),
1635 os_version: "1.0".to_string(),
1636 hardware_id: "test123".to_string(),
1637 device_generation: Some(2023),
1638 },
1639 cpu_info: CpuInfo {
1640 architecture: "arm64".to_string(),
1641 total_cores: 8,
1642 core_count: 8,
1643 performance_cores: 4,
1644 efficiency_cores: 4,
1645 max_frequency_mhz: Some(3000),
1646 l1_cache_kb: Some(64),
1647 l2_cache_kb: Some(512),
1648 l3_cache_kb: Some(8192),
1649 features: vec!["NEON".to_string()],
1650 simd_support: crate::device_info::SimdSupport::Advanced,
1651 },
1652 memory_info: MemoryInfo {
1653 total_mb: 4096,
1654 available_mb: 2048,
1655 total_memory: 4096,
1656 available_memory: 2048,
1657 bandwidth_mbps: Some(25600),
1658 memory_type: "LPDDR5".to_string(),
1659 frequency_mhz: Some(6400),
1660 is_low_memory_device: false,
1661 },
1662 gpu_info: None,
1663 npu_info: None,
1664 thermal_info: crate::device_info::ThermalInfo {
1665 current_state: crate::device_info::ThermalState::Nominal,
1666 state: crate::device_info::ThermalState::Nominal,
1667 throttling_supported: true,
1668 temperature_sensors: vec![],
1669 thermal_zones: vec![],
1670 },
1671 power_info: crate::device_info::PowerInfo {
1672 battery_capacity_mah: Some(3000),
1673 battery_level_percent: Some(75),
1674 battery_level: Some(75),
1675 battery_health_percent: Some(95),
1676 charging_status: crate::device_info::ChargingStatus::NotCharging,
1677 is_charging: false,
1678 power_save_mode: false,
1679 low_power_mode_available: true,
1680 },
1681 available_backends: vec![crate::MobileBackend::CPU],
1682 performance_scores: PerformanceScores {
1683 cpu_single_core: Some(1200),
1684 cpu_multi_core: Some(8500),
1685 gpu_score: None,
1686 memory_score: Some(9200),
1687 overall_tier: PerformanceTier::High,
1688 tier: PerformanceTier::High,
1689 },
1690 }
1691 }
1692
1693 #[test]
1694 fn test_profiler_creation() {
1695 let device_info = create_test_device_info();
1696 let config = ProfilerConfig::default();
1697
1698 let profiler = MobilePerformanceProfiler::new(config, &device_info);
1699 assert!(profiler.is_ok());
1700 }
1701
1702 #[test]
1703 fn test_profiler_config_defaults() {
1704 let config = ProfilerConfig::default();
1705 assert!(config.enable_realtime_profiling);
1706 assert_eq!(config.profiling_interval_ms, 1000);
1707 assert!(config.enable_platform_integration);
1708 assert_eq!(config.max_history_size, 1000);
1709 }
1710
1711 #[test]
1712 fn test_metrics_config_defaults() {
1713 let config = MetricsConfig::default();
1714 assert!(config.collect_cpu);
1715 assert!(config.collect_gpu);
1716 assert!(config.collect_memory);
1717 assert!(config.collect_network);
1718 assert!(config.collect_inference);
1719 assert_eq!(config.sampling_rate_hz, 10);
1720 }
1721
1722 #[test]
1723 fn test_bottleneck_config_defaults() {
1724 let config = BottleneckConfig::default();
1725 assert!(config.detect_cpu_bottlenecks);
1726 assert!(config.detect_memory_bottlenecks);
1727 assert!(config.detect_io_bottlenecks);
1728 assert!(config.detect_thermal_bottlenecks);
1729 assert_eq!(config.cpu_threshold_percent, 80.0);
1730 assert_eq!(config.memory_threshold_percent, 85.0);
1731 }
1732
1733 #[test]
1734 fn test_alert_thresholds_defaults() {
1735 let thresholds = AlertThresholds::default();
1736 assert_eq!(thresholds.cpu_threshold_percent, 90.0);
1737 assert_eq!(thresholds.memory_threshold_percent, 90.0);
1738 assert_eq!(thresholds.latency_threshold_ms, 500.0);
1739 assert_eq!(thresholds.temperature_threshold_celsius, 85.0);
1740 assert_eq!(thresholds.battery_threshold_percent, 20);
1741 assert_eq!(thresholds.power_threshold_mw, 5000.0);
1742 }
1743
1744 #[test]
1745 fn test_performance_score_calculation() {
1746 let device_info = create_test_device_info();
1747 let config = ProfilerConfig::default();
1748 let profiler =
1749 MobilePerformanceProfiler::new(config, &device_info).expect("Operation failed");
1750
1751 let metrics = MetricsSnapshot {
1752 timestamp: Instant::now(),
1753 platform_metrics: PlatformMetrics::default(),
1754 inference_metrics: InferenceMetrics::default(),
1755 thermal_metrics: None,
1756 battery_metrics: None,
1757 };
1758
1759 let bottlenecks = vec![];
1760 let score = profiler.calculate_performance_score(&metrics, &bottlenecks);
1761 assert!((0.0..=100.0).contains(&score));
1762 }
1763
1764 #[test]
1765 fn test_optimized_config_generation() {
1766 let device_info = create_test_device_info();
1767 let config = MobileProfilerUtils::create_optimized_config(&device_info);
1768
1769 assert_eq!(config.profiling_interval_ms, 1000);
1771 assert_eq!(config.metrics_config.sampling_rate_hz, 10);
1772 assert_eq!(config.max_history_size, 1000);
1773 assert!(config.metrics_config.detailed_collection);
1774 }
1775
1776 #[test]
1777 fn test_efficiency_score_calculation() {
1778 let metrics = MetricsSnapshot {
1779 timestamp: Instant::now(),
1780 platform_metrics: PlatformMetrics {
1781 cpu_metrics: CpuMetrics {
1782 utilization_percent: 50.0,
1783 ..Default::default()
1784 },
1785 memory_metrics: MemoryMetrics {
1786 pressure_level: MemoryPressureLevel::Low,
1787 ..Default::default()
1788 },
1789 ..Default::default()
1790 },
1791 inference_metrics: InferenceMetrics {
1792 latency_ms: 100.0,
1793 ..Default::default()
1794 },
1795 thermal_metrics: None,
1796 battery_metrics: None,
1797 };
1798
1799 let score = MobileProfilerUtils::calculate_efficiency_score(&metrics);
1800 assert!((0.0..=100.0).contains(&score));
1801 }
1802
1803 #[test]
1804 fn test_platform_profiler_capabilities() {
1805 let ios_profiler = IOSProfiler::new().expect("Operation failed");
1806 let capabilities = ios_profiler.get_capabilities();
1807 assert!(capabilities.contains(&ProfilerCapability::CpuProfiling));
1808 assert!(capabilities.contains(&ProfilerCapability::InstrumentsIntegration));
1809
1810 let android_profiler = AndroidProfiler::new().expect("Operation failed");
1811 let capabilities = android_profiler.get_capabilities();
1812 assert!(capabilities.contains(&ProfilerCapability::CpuProfiling));
1813 assert!(capabilities.contains(&ProfilerCapability::SystraceIntegration));
1814 }
1815
1816 #[test]
1817 fn test_bottleneck_severity_calculation() {
1818 let config = BottleneckConfig::default();
1819 let detector = BottleneckDetector::new(config);
1820
1821 let severity = detector.calculate_bottleneck_severity(120.0, 80.0);
1822 assert_eq!(severity, BottleneckSeverity::High);
1823
1824 let severity = detector.calculate_bottleneck_severity(160.0, 80.0);
1825 assert_eq!(severity, BottleneckSeverity::Critical);
1826 }
1827
1828 #[test]
1829 fn test_memory_pressure_levels() {
1830 assert!(MemoryPressureLevel::Critical > MemoryPressureLevel::High);
1831 assert!(MemoryPressureLevel::High > MemoryPressureLevel::Medium);
1832 assert!(MemoryPressureLevel::Medium > MemoryPressureLevel::Low);
1833 }
1834
1835 #[test]
1836 fn test_export_format_serialization() {
1837 let format = ExportFormat::JSON;
1838 let serialized = serde_json::to_string(&format).expect("Operation failed");
1839 let deserialized: ExportFormat =
1840 serde_json::from_str(&serialized).expect("Operation failed");
1841 assert_eq!(format, deserialized);
1842 }
1843}