1use crate::debug::DebugLogger;
12use crate::performance::*;
13use js_sys::{Array, Date, Object};
14use std::string::{String, ToString};
15use std::vec::Vec;
16use wasm_bindgen::prelude::*;
17
18#[wasm_bindgen]
20pub struct PerformanceProfiler {
21 config: ProfilerConfig,
22 real_time_analytics: RealTimeAnalytics,
23 adaptive_optimizer: AdaptiveOptimizer,
24 performance_trends: Vec<PerformanceTrend>,
25 detected_anomalies: Vec<PerformanceAnomaly>,
26 current_baselines: Vec<PerformanceBaseline>,
27 operation_profiles: Vec<OperationProfile>,
28 resource_samples: Vec<ResourceSample>,
29 active_operations: Vec<(String, OperationType, f64)>, baseline_metrics: Option<PerformanceSummary>,
31 debug_logger: Option<DebugLogger>,
32}
33
34#[wasm_bindgen]
35impl PerformanceProfiler {
36 #[wasm_bindgen(constructor)]
38 pub fn new(config: ProfilerConfig) -> Self {
39 Self {
40 config,
41 real_time_analytics: RealTimeAnalytics {
42 enabled: true,
43 window_size: 100,
44 trend_analysis: true,
45 anomaly_detection: true,
46 predictive_modeling: true,
47 adaptive_optimization: true,
48 regression_detection: true,
49 },
50 adaptive_optimizer: AdaptiveOptimizer {
51 enabled: true,
52 learning_rate: 0.1,
53 optimization_targets: vec![OptimizationTarget::Balanced],
54 current_strategy: OptimizationStrategy::Hybrid,
55 adaptation_history: Vec::new(),
56 performance_baselines: Vec::new(),
57 },
58 performance_trends: Vec::new(),
59 detected_anomalies: Vec::new(),
60 current_baselines: Vec::new(),
61 operation_profiles: Vec::new(),
62 resource_samples: Vec::new(),
63 active_operations: Vec::new(),
64 baseline_metrics: None,
65 debug_logger: None,
66 }
67 }
68
69 pub fn set_debug_logger(&mut self, logger: DebugLogger) {
71 self.debug_logger = Some(logger);
72 }
73
74 pub fn start_operation(&mut self, name: &str, operation_type: OperationType) {
76 if !self.config.enabled() {
77 return;
78 }
79
80 let start_time = Date::now();
81 self.active_operations.push((name.to_string(), operation_type, start_time));
82
83 if self.config.detailed_timing() {
84 web_sys::console::time_with_label(&format!("🔍 {name}"));
85 }
86
87 if let Some(ref mut logger) = self.debug_logger {
89 logger.start_timer(name);
90 }
91 }
92
93 pub fn end_operation(&mut self, name: &str) -> Option<f64> {
95 if !self.config.enabled() {
96 return None;
97 }
98
99 let end_time = Date::now();
100
101 if let Some(pos) = self.active_operations.iter().position(|(op_name, _, _)| op_name == name)
103 {
104 let (_, operation_type, start_time) = self.active_operations.remove(pos);
105 let duration_ms = end_time - start_time;
106
107 let profile = OperationProfile {
109 operation_type,
110 operation_name: name.to_string(),
111 start_time,
112 end_time,
113 duration_ms,
114 cpu_time_ms: duration_ms * 0.8, gpu_time_ms: duration_ms * 0.2, memory_allocated: self.estimate_memory_usage(operation_type),
117 memory_peak: crate::get_wasm_memory_usage(),
118 gpu_memory_used: self.estimate_gpu_memory_usage(operation_type),
119 flops: self.estimate_flops(operation_type, duration_ms),
120 memory_bandwidth_gb_s: self.estimate_memory_bandwidth(operation_type),
121 cache_hits: self.estimate_cache_hits(operation_type),
122 cache_misses: self.estimate_cache_misses(operation_type),
123 input_shape: self.estimate_input_shape(operation_type),
124 output_shape: self.estimate_output_shape(operation_type),
125 };
126
127 self.operation_profiles.push(profile);
128
129 if self.operation_profiles.len() > self.config.max_samples() {
131 self.operation_profiles.remove(0);
132 }
133
134 if self.config.detailed_timing() {
135 web_sys::console::time_end_with_label(&format!("🔍 {name}"));
136 web_sys::console::log_1(
137 &format!("📊 {name} completed in {duration_ms:.2}ms").into(),
138 );
139 }
140
141 if let Some(ref mut logger) = self.debug_logger {
143 logger.end_timer(name);
144 }
145
146 Some(duration_ms)
147 } else {
148 None
149 }
150 }
151
152 pub fn sample_resources(&mut self) {
154 if !self.config.enabled() || !self.config.resource_monitoring() {
155 return;
156 }
157
158 let timestamp = Date::now();
159 let cpu_usage = self.estimate_cpu_usage();
160 let gpu_usage = self.estimate_gpu_usage();
161 let wasm_memory = crate::get_wasm_memory_usage();
162 let gpu_memory = self.estimate_gpu_memory_usage(OperationType::FullInference);
163 let battery_level = self.get_battery_level();
164 let power_consumption = self.estimate_power_consumption();
165 let thermal_state = self.get_thermal_state();
166 let cpu_temp = self.estimate_cpu_temperature();
167 let gpu_temp = self.estimate_gpu_temperature();
168
169 let samples = vec![
171 ResourceSample {
172 timestamp,
173 resource_type: ResourceType::CPU,
174 value: cpu_usage,
175 cpu_usage,
176 gpu_usage,
177 wasm_memory,
178 gpu_memory,
179 network_bytes: 0,
180 cache_hit_rate: 0.85,
181 battery_level,
182 power_consumption,
183 thermal_state,
184 cpu_temperature: cpu_temp,
185 gpu_temperature: gpu_temp,
186 },
187 ResourceSample {
188 timestamp,
189 resource_type: ResourceType::Battery,
190 value: battery_level,
191 cpu_usage,
192 gpu_usage,
193 wasm_memory,
194 gpu_memory,
195 network_bytes: 0,
196 cache_hit_rate: 0.85,
197 battery_level,
198 power_consumption,
199 thermal_state,
200 cpu_temperature: cpu_temp,
201 gpu_temperature: gpu_temp,
202 },
203 ResourceSample {
204 timestamp,
205 resource_type: ResourceType::Thermal,
206 value: thermal_state,
207 cpu_usage,
208 gpu_usage,
209 wasm_memory,
210 gpu_memory,
211 network_bytes: 0,
212 cache_hit_rate: 0.85,
213 battery_level,
214 power_consumption,
215 thermal_state,
216 cpu_temperature: cpu_temp,
217 gpu_temperature: gpu_temp,
218 },
219 ];
220
221 for sample in samples {
222 self.resource_samples.push(sample);
223 }
224
225 while self.resource_samples.len() > self.config.max_samples() {
227 self.resource_samples.remove(0);
228 }
229
230 self.check_thermal_throttling(thermal_state, cpu_temp, gpu_temp);
232 }
233
234 pub fn analyze_performance(&self) -> String {
236 let summary = self.analyze_performance_internal();
237 serde_json::to_string(&summary).unwrap_or_else(|_| "{}".to_string())
238 }
239
240 fn analyze_performance_internal(&self) -> PerformanceSummary {
241 let total_time_ms: f64 = self.operation_profiles.iter().map(|p| p.duration_ms).sum();
242 let operation_count = self.operation_profiles.len();
243 let average_fps = if total_time_ms > 0.0 {
244 1000.0 / (total_time_ms / operation_count as f64)
245 } else {
246 0.0
247 };
248
249 let bottlenecks = if self.config.bottleneck_detection() {
250 self.detect_bottlenecks()
251 } else {
252 Vec::new()
253 };
254
255 let top_operations = self.get_top_operations(10);
256 let resource_efficiency = self.calculate_resource_efficiency();
257 let recommendations = self.generate_recommendations(&bottlenecks);
258
259 PerformanceSummary {
260 total_time_ms,
261 operation_count,
262 average_fps: average_fps as f32,
263 bottlenecks,
264 top_operations,
265 resource_efficiency,
266 recommendations,
267 }
268 }
269
270 pub fn get_performance_summary(&self) -> String {
272 let summary = self.analyze_performance_internal();
273 serde_json::to_string_pretty(&summary).unwrap_or_else(|_| "{}".to_string())
274 }
275
276 pub fn get_operation_breakdown(&self) -> Array {
278 let array = Array::new();
279
280 for profile in &self.operation_profiles {
281 let obj = Object::new();
282 js_sys::Reflect::set(&obj, &"name".into(), &profile.operation_name.clone().into())
283 .expect("Failed to set operation name property");
284 js_sys::Reflect::set(
285 &obj,
286 &"type".into(),
287 &format!("{op_type:?}", op_type = profile.operation_type).into(),
288 )
289 .expect("Failed to set operation type property");
290 js_sys::Reflect::set(&obj, &"duration".into(), &profile.duration_ms.into())
291 .expect("Failed to set duration property");
292 js_sys::Reflect::set(&obj, &"start_time".into(), &profile.start_time.into())
293 .expect("Failed to set start_time property");
294 js_sys::Reflect::set(&obj, &"cpu_time".into(), &profile.cpu_time_ms.into())
295 .expect("Failed to set cpu_time property");
296 js_sys::Reflect::set(&obj, &"gpu_time".into(), &profile.gpu_time_ms.into())
297 .expect("Failed to set gpu_time property");
298 js_sys::Reflect::set(&obj, &"memory".into(), &profile.memory_allocated.into())
299 .expect("Failed to set memory property");
300 array.push(&obj);
301 }
302
303 array
304 }
305
306 pub fn get_resource_timeline(&self) -> Array {
308 let array = Array::new();
309
310 for sample in &self.resource_samples {
311 let obj = Object::new();
312 js_sys::Reflect::set(&obj, &"timestamp".into(), &sample.timestamp.into())
313 .expect("Failed to set timestamp property");
314 js_sys::Reflect::set(&obj, &"cpu".into(), &sample.cpu_usage.into())
315 .expect("Failed to set cpu property");
316 js_sys::Reflect::set(&obj, &"gpu".into(), &sample.gpu_usage.into())
317 .expect("Failed to set gpu property");
318 js_sys::Reflect::set(&obj, &"memory".into(), &sample.wasm_memory.into())
319 .expect("Failed to set memory property");
320 js_sys::Reflect::set(&obj, &"gpu_memory".into(), &sample.gpu_memory.into())
321 .expect("Failed to set gpu_memory property");
322 array.push(&obj);
323 }
324
325 array
326 }
327
328 pub fn set_baseline(&mut self) {
330 self.baseline_metrics = Some(self.analyze_performance_internal());
331 }
332
333 pub fn compare_with_baseline(&self) -> Option<String> {
335 if let Some(ref baseline) = self.baseline_metrics {
336 let current = self.analyze_performance_internal();
337
338 let time_change =
339 ((current.total_time_ms - baseline.total_time_ms) / baseline.total_time_ms) * 100.0;
340 let fps_change = ((current.average_fps as f64 - baseline.average_fps as f64)
341 / baseline.average_fps as f64)
342 * 100.0;
343 let efficiency_change =
344 (current.resource_efficiency as f64 - baseline.resource_efficiency as f64) * 100.0;
345
346 Some(format!(
347 "Performance Comparison:\n\
348 Total Time: {:.1}% change\n\
349 Average FPS: {:.1}% change\n\
350 Resource Efficiency: {:.1}% change\n\
351 Bottlenecks: {} current vs {} baseline",
352 time_change,
353 fps_change,
354 efficiency_change,
355 current.bottlenecks.len(),
356 baseline.bottlenecks.len()
357 ))
358 } else {
359 None
360 }
361 }
362
363 pub fn clear(&mut self) {
365 self.operation_profiles.clear();
366 self.resource_samples.clear();
367 self.active_operations.clear();
368 }
369
370 pub fn export_data(&self) -> String {
372 let summary = self.analyze_performance();
374
375 format!(
376 r#"{{
377 "operation_profiles": {},
378 "resource_samples": {},
379 "performance_summary": {},
380 "baseline": {}
381}}"#,
382 serde_json::to_string(&self.operation_profiles).unwrap_or("[]".to_string()),
383 serde_json::to_string(&self.resource_samples).unwrap_or("[]".to_string()),
384 serde_json::to_string(&summary).unwrap_or("{}".to_string()),
385 serde_json::to_string(&self.baseline_metrics).unwrap_or("null".to_string())
386 )
387 }
388
389 fn detect_bottlenecks(&self) -> Vec<Bottleneck> {
392 let mut bottlenecks = Vec::new();
393
394 if self.operation_profiles.is_empty() {
395 return bottlenecks;
396 }
397
398 let total_time: f64 = self.operation_profiles.iter().map(|p| p.duration_ms).sum();
399
400 for profile in &self.operation_profiles {
402 let time_percentage = (profile.duration_ms / total_time) * 100.0;
403
404 if time_percentage > 20.0 {
405 let bottleneck_type = if profile.gpu_time_ms > profile.cpu_time_ms {
406 BottleneckType::GPUCompute
407 } else {
408 BottleneckType::CPUCompute
409 };
410
411 bottlenecks.push(Bottleneck {
412 bottleneck_type,
413 operation: profile.operation_name.clone(),
414 severity: (time_percentage / 100.0).min(1.0) as f32,
415 time_percentage: time_percentage as f32,
416 description: format!(
417 "{} takes {:.1}% of total time",
418 profile.operation_name, time_percentage
419 ),
420 recommendation: self.get_optimization_recommendation(bottleneck_type),
421 });
422 }
423 }
424
425 let max_memory = self.operation_profiles.iter().map(|p| p.memory_peak).max().unwrap_or(0);
427 if max_memory > 100 * 1024 * 1024 {
428 bottlenecks.push(Bottleneck {
430 bottleneck_type: BottleneckType::MemoryCapacity,
431 operation: "Overall".to_string(),
432 severity: 0.7,
433 time_percentage: 0.0,
434 description: format!(
435 "High memory usage: {size}MB",
436 size = max_memory / (1024 * 1024)
437 ),
438 recommendation: "Consider model quantization or weight compression".to_string(),
439 });
440 }
441
442 bottlenecks
443 }
444
445 fn get_top_operations(&self, limit: usize) -> Vec<(String, f64)> {
446 let mut operations: Vec<_> = self
447 .operation_profiles
448 .iter()
449 .map(|p| (p.operation_name.clone(), p.duration_ms))
450 .collect();
451
452 operations.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
453 operations.truncate(limit);
454 operations
455 }
456
457 fn calculate_resource_efficiency(&self) -> f32 {
458 if self.resource_samples.is_empty() {
459 return 0.5;
460 }
461
462 let avg_cpu = self.resource_samples.iter().map(|s| s.cpu_usage).sum::<f32>()
463 / self.resource_samples.len() as f32;
464 let avg_gpu = self.resource_samples.iter().map(|s| s.gpu_usage).sum::<f32>()
465 / self.resource_samples.len() as f32;
466
467 let cpu_efficiency = (avg_cpu / 100.0).min(1.0);
469 let gpu_efficiency = (avg_gpu / 100.0).min(1.0);
470
471 (cpu_efficiency + gpu_efficiency) / 2.0
472 }
473
474 fn generate_recommendations(&self, bottlenecks: &[Bottleneck]) -> Vec<String> {
475 let mut recommendations = Vec::new();
476
477 for bottleneck in bottlenecks {
478 recommendations.push(bottleneck.recommendation.clone());
479 }
480
481 if self.operation_profiles.len() > 1000 {
483 recommendations.push("Consider reducing profiling overhead in production".to_string());
484 }
485
486 let avg_duration: f64 = self.operation_profiles.iter().map(|p| p.duration_ms).sum::<f64>()
487 / self.operation_profiles.len() as f64;
488 if avg_duration > 100.0 {
489 recommendations
490 .push("Consider model optimization techniques like quantization".to_string());
491 }
492
493 recommendations
494 }
495
496 fn get_optimization_recommendation(&self, bottleneck_type: BottleneckType) -> String {
497 match bottleneck_type {
498 BottleneckType::CPUCompute => {
499 "Consider using WebGPU acceleration or SIMD optimizations".to_string()
500 },
501 BottleneckType::GPUCompute => {
502 "Consider kernel fusion or reducing data transfers".to_string()
503 },
504 BottleneckType::MemoryBandwidth => {
505 "Consider data layout optimizations or caching".to_string()
506 },
507 BottleneckType::MemoryCapacity => {
508 "Consider model compression or quantization".to_string()
509 },
510 BottleneckType::GPUMemory => "Consider reducing batch size or model size".to_string(),
511 BottleneckType::DataTransfer => {
512 "Consider batching operations or reducing transfers".to_string()
513 },
514 BottleneckType::Serialization => "Consider binary formats or streaming".to_string(),
515 BottleneckType::JSInterop => "Consider reducing WASM/JS boundary crossings".to_string(),
516 }
517 }
518
519 fn estimate_memory_usage(&self, op_type: OperationType) -> usize {
522 match op_type {
523 OperationType::ModelLoading => 50 * 1024 * 1024,
524 OperationType::TransformerLayer => 10 * 1024 * 1024,
525 OperationType::Attention => 5 * 1024 * 1024,
526 OperationType::MatMul => 2 * 1024 * 1024,
527 _ => 1024 * 1024,
528 }
529 }
530
531 fn estimate_gpu_memory_usage(&self, op_type: OperationType) -> usize {
532 match op_type {
533 OperationType::ModelLoading => 100 * 1024 * 1024,
534 OperationType::TransformerLayer => 20 * 1024 * 1024,
535 OperationType::Attention => 10 * 1024 * 1024,
536 OperationType::MatMul => 5 * 1024 * 1024,
537 _ => 1024 * 1024,
538 }
539 }
540
541 fn estimate_flops(&self, op_type: OperationType, duration_ms: f64) -> u64 {
542 let base_flops = match op_type {
543 OperationType::MatMul => 1_000_000_000,
544 OperationType::Attention => 500_000_000,
545 OperationType::TransformerLayer => 2_000_000_000,
546 _ => 100_000_000,
547 };
548 (base_flops as f64 * (duration_ms / 1000.0)) as u64
549 }
550
551 fn estimate_memory_bandwidth(&self, op_type: OperationType) -> f32 {
552 match op_type {
553 OperationType::MatMul => 100.0,
554 OperationType::MemoryTransfer => 50.0,
555 _ => 20.0,
556 }
557 }
558
559 fn estimate_cache_hits(&self, op_type: OperationType) -> u32 {
560 match op_type {
561 OperationType::Embedding => 1000,
562 OperationType::Attention => 500,
563 _ => 100,
564 }
565 }
566
567 fn estimate_cache_misses(&self, op_type: OperationType) -> u32 {
568 match op_type {
569 OperationType::ModelLoading => 500,
570 OperationType::MemoryTransfer => 200,
571 _ => 20,
572 }
573 }
574
575 fn estimate_input_shape(&self, op_type: OperationType) -> Vec<usize> {
576 match op_type {
577 OperationType::TransformerLayer => vec![1, 512, 768],
578 OperationType::Attention => vec![1, 12, 512, 64],
579 OperationType::MatMul => vec![512, 768],
580 _ => vec![1, 512],
581 }
582 }
583
584 fn estimate_output_shape(&self, op_type: OperationType) -> Vec<usize> {
585 match op_type {
586 OperationType::TransformerLayer => vec![1, 512, 768],
587 OperationType::Attention => vec![1, 512, 768],
588 OperationType::MatMul => vec![512, 3072],
589 _ => vec![1, 512],
590 }
591 }
592
593 fn estimate_cpu_usage(&self) -> f32 {
594 50.0 + ((Date::now() % 100.0) / 2.0) as f32
596 }
597
598 fn estimate_gpu_usage(&self) -> f32 {
599 30.0 + ((Date::now() % 100.0) / 3.0) as f32
601 }
602
603 pub fn update_real_time_metrics(
607 &mut self,
608 latency_ms: f64,
609 throughput: f32,
610 memory_mb: f32,
611 accuracy: f32,
612 ) {
613 if !self.real_time_analytics.enabled {
614 return;
615 }
616
617 let timestamp = Date::now();
618
619 self.update_performance_trend("latency", latency_ms, timestamp);
621 self.update_performance_trend("throughput", throughput as f64, timestamp);
622 self.update_performance_trend("memory", memory_mb as f64, timestamp);
623 self.update_performance_trend("accuracy", accuracy as f64, timestamp);
624
625 if self.real_time_analytics.anomaly_detection {
627 self.detect_anomalies(timestamp, latency_ms, throughput, memory_mb, accuracy);
628 }
629
630 if self.real_time_analytics.adaptive_optimization && self.adaptive_optimizer.enabled {
632 self.check_and_trigger_adaptation(
633 timestamp, latency_ms, throughput, memory_mb, accuracy,
634 );
635 }
636
637 web_sys::console::log_1(&format!(
638 "📊 Real-time metrics: {:.1}ms latency, {:.1} throughput, {:.1}MB memory, {:.1}% accuracy",
639 latency_ms, throughput, memory_mb, accuracy * 100.0
640 ).into());
641 }
642
643 pub fn set_optimization_target(&mut self, target_name: &str) {
645 let target = match target_name {
646 "latency" => OptimizationTarget::Latency,
647 "throughput" => OptimizationTarget::Throughput,
648 "memory" => OptimizationTarget::MemoryUsage,
649 "power" => OptimizationTarget::PowerEfficiency,
650 "accuracy" => OptimizationTarget::Accuracy,
651 "balanced" => OptimizationTarget::Balanced,
652 _ => OptimizationTarget::Balanced,
653 };
654
655 self.adaptive_optimizer.optimization_targets = vec![target];
656 web_sys::console::log_1(&format!("🎯 Optimization target set to: {target:?}").into());
657 }
658
659 pub fn get_performance_trends(&self) -> js_sys::Object {
661 let trends_obj = js_sys::Object::new();
662
663 for trend in &self.performance_trends {
664 let trend_obj = js_sys::Object::new();
665
666 let values_array = js_sys::Array::new();
668 for &value in &trend.values {
669 values_array.push(&value.into());
670 }
671
672 let timestamps_array = js_sys::Array::new();
673 for ×tamp in &trend.timestamps {
674 timestamps_array.push(×tamp.into());
675 }
676
677 js_sys::Reflect::set(&trend_obj, &"values".into(), &values_array)
678 .expect("Failed to set trend values property");
679 js_sys::Reflect::set(&trend_obj, &"timestamps".into(), ×tamps_array)
680 .expect("Failed to set trend timestamps property");
681 js_sys::Reflect::set(
682 &trend_obj,
683 &"direction".into(),
684 &format!("{direction:?}", direction = trend.trend_direction).into(),
685 )
686 .expect("Failed to set trend direction property");
687 js_sys::Reflect::set(&trend_obj, &"strength".into(), &trend.trend_strength.into())
688 .expect("Failed to set trend strength property");
689 js_sys::Reflect::set(
690 &trend_obj,
691 &"predicted_next".into(),
692 &trend.predicted_next_value.into(),
693 )
694 .expect("Failed to set trend predicted_next property");
695
696 js_sys::Reflect::set(&trends_obj, &trend.metric_name.clone().into(), &trend_obj)
697 .expect("Failed to set trend metric in trends object");
698 }
699
700 trends_obj
701 }
702
703 pub fn get_detected_anomalies(&self) -> js_sys::Array {
705 let anomalies_array = js_sys::Array::new();
706
707 for anomaly in &self.detected_anomalies {
708 let anomaly_obj = js_sys::Object::new();
709 js_sys::Reflect::set(&anomaly_obj, &"timestamp".into(), &anomaly.timestamp.into())
710 .expect("Failed to set anomaly timestamp property");
711 js_sys::Reflect::set(
712 &anomaly_obj,
713 &"metric".into(),
714 &anomaly.metric_name.clone().into(),
715 )
716 .expect("Failed to set anomaly metric property");
717 js_sys::Reflect::set(
718 &anomaly_obj,
719 &"expected".into(),
720 &anomaly.expected_value.into(),
721 )
722 .expect("Failed to set anomaly expected property");
723 js_sys::Reflect::set(&anomaly_obj, &"actual".into(), &anomaly.actual_value.into())
724 .expect("Failed to set anomaly actual property");
725 js_sys::Reflect::set(
726 &anomaly_obj,
727 &"severity".into(),
728 &format!("{severity:?}", severity = anomaly.severity).into(),
729 )
730 .expect("Failed to set anomaly severity property");
731 js_sys::Reflect::set(
732 &anomaly_obj,
733 &"description".into(),
734 &anomaly.description.clone().into(),
735 )
736 .expect("Failed to set anomaly description property");
737 js_sys::Reflect::set(
738 &anomaly_obj,
739 &"suggested_action".into(),
740 &anomaly.suggested_action.clone().into(),
741 )
742 .expect("Failed to set anomaly suggested_action property");
743
744 anomalies_array.push(&anomaly_obj);
745 }
746
747 anomalies_array
748 }
749
750 pub fn get_adaptive_state(&self) -> js_sys::Object {
752 let state_obj = js_sys::Object::new();
753
754 js_sys::Reflect::set(
755 &state_obj,
756 &"enabled".into(),
757 &self.adaptive_optimizer.enabled.into(),
758 )
759 .expect("Failed to set adaptive state enabled property");
760 js_sys::Reflect::set(
761 &state_obj,
762 &"learning_rate".into(),
763 &self.adaptive_optimizer.learning_rate.into(),
764 )
765 .expect("Failed to set adaptive state learning_rate property");
766 js_sys::Reflect::set(
767 &state_obj,
768 &"current_strategy".into(),
769 &format!(
770 "{strategy:?}",
771 strategy = self.adaptive_optimizer.current_strategy
772 )
773 .into(),
774 )
775 .expect("Failed to set adaptive state current_strategy property");
776 js_sys::Reflect::set(
777 &state_obj,
778 &"adaptation_count".into(),
779 &self.adaptive_optimizer.adaptation_history.len().into(),
780 )
781 .expect("Failed to set adaptive state adaptation_count property");
782
783 let targets_array = js_sys::Array::new();
785 for target in &self.adaptive_optimizer.optimization_targets {
786 targets_array.push(&format!("{target:?}").into());
787 }
788 js_sys::Reflect::set(&state_obj, &"optimization_targets".into(), &targets_array)
789 .expect("Failed to set adaptive state optimization_targets property");
790
791 state_obj
792 }
793
794 pub fn set_real_time_analytics(&mut self, enabled: bool) {
796 self.real_time_analytics.enabled = enabled;
797 web_sys::console::log_1(
798 &format!(
799 "📊 Real-time analytics: {}",
800 if enabled { "enabled" } else { "disabled" }
801 )
802 .into(),
803 );
804 }
805
806 pub fn set_adaptive_optimization(&mut self, enabled: bool) {
808 self.adaptive_optimizer.enabled = enabled;
809 web_sys::console::log_1(
810 &format!(
811 "🤖 Adaptive optimization: {}",
812 if enabled { "enabled" } else { "disabled" }
813 )
814 .into(),
815 );
816 }
817
818 pub fn create_baseline(&mut self, name: &str) {
820 if self.operation_profiles.is_empty() {
821 web_sys::console::log_1(
822 &"⚠️ No performance data available for baseline creation".into(),
823 );
824 return;
825 }
826
827 let avg_latency = self.operation_profiles.iter().map(|p| p.duration_ms).sum::<f64>()
828 / self.operation_profiles.len() as f64;
829 let avg_memory =
830 self.operation_profiles.iter().map(|p| p.memory_allocated as f64).sum::<f64>()
831 / self.operation_profiles.len() as f64
832 / 1_048_576.0; let baseline = PerformanceBaseline {
835 name: name.to_string(),
836 timestamp: Date::now(),
837 avg_latency_ms: avg_latency,
838 avg_throughput: if avg_latency > 0.0 { 1000.0 / avg_latency as f32 } else { 0.0 },
839 avg_memory_mb: avg_memory as f32,
840 avg_accuracy: 0.95, };
842
843 self.current_baselines.push(baseline);
844 web_sys::console::log_1(&format!("📏 Performance baseline '{name}' created").into());
845 }
846
847 fn update_performance_trend(&mut self, metric_name: &str, value: f64, timestamp: f64) {
850 let trend_index = self.performance_trends.iter().position(|t| t.metric_name == metric_name);
852
853 let trend = if let Some(index) = trend_index {
854 &mut self.performance_trends[index]
855 } else {
856 self.performance_trends.push(PerformanceTrend {
857 metric_name: metric_name.to_string(),
858 values: Vec::new(),
859 timestamps: Vec::new(),
860 trend_direction: TrendDirection::Stable,
861 trend_strength: 0.0,
862 predicted_next_value: value,
863 });
864 self.performance_trends
865 .last_mut()
866 .expect("trend just pushed to performance_trends")
867 };
868
869 trend.values.push(value);
871 trend.timestamps.push(timestamp);
872
873 if trend.values.len() > self.real_time_analytics.window_size {
875 trend.values.remove(0);
876 trend.timestamps.remove(0);
877 }
878
879 if self.real_time_analytics.trend_analysis && trend.values.len() >= 5 {
881 let values = trend.values.clone();
883 let n = values.len();
884
885 if n >= 5 {
886 let recent_values = &values[n - 5..];
888 let sum_x: f64 = (0..5).map(|i| i as f64).sum();
889 let sum_y: f64 = recent_values.iter().sum();
890 let sum_xy: f64 =
891 recent_values.iter().enumerate().map(|(i, &y)| i as f64 * y).sum();
892 let sum_x2: f64 = (0..5).map(|i| (i * i) as f64).sum();
893
894 let slope = (5.0 * sum_xy - sum_x * sum_y) / (5.0 * sum_x2 - sum_x * sum_x);
895 let variance = recent_values
896 .iter()
897 .map(|&x| {
898 let diff = x - sum_y / 5.0;
899 diff * diff
900 })
901 .sum::<f64>()
902 / 5.0;
903
904 let trend_threshold = variance.sqrt() * 0.1;
906
907 if slope > trend_threshold {
908 trend.trend_direction = TrendDirection::Improving;
909 trend.trend_strength = (slope / variance.sqrt()).abs().min(1.0) as f32;
910 } else if slope < -trend_threshold {
911 trend.trend_direction = TrendDirection::Degrading;
912 trend.trend_strength = (slope / variance.sqrt()).abs().min(1.0) as f32;
913 } else if variance > trend_threshold * trend_threshold {
914 trend.trend_direction = TrendDirection::Volatile;
915 trend.trend_strength = (variance.sqrt() / sum_y.abs()).min(1.0) as f32;
916 } else {
917 trend.trend_direction = TrendDirection::Stable;
918 trend.trend_strength = 0.1;
919 }
920
921 let last_value = values[n - 1];
923 trend.predicted_next_value = last_value + slope;
924 }
925 }
926 }
927
928 fn detect_anomalies(
929 &mut self,
930 timestamp: f64,
931 latency_ms: f64,
932 throughput: f32,
933 memory_mb: f32,
934 accuracy: f32,
935 ) {
936 let metrics = [
937 ("latency", latency_ms),
938 ("throughput", throughput as f64),
939 ("memory", memory_mb as f64),
940 ("accuracy", accuracy as f64),
941 ];
942
943 for (metric_name, value) in &metrics {
944 self.check_metric_anomaly(timestamp, metric_name, *value);
945 }
946 }
947
948 fn check_metric_anomaly(&mut self, timestamp: f64, metric_name: &str, value: f64) {
949 if let Some(trend) =
950 self.performance_trends.iter().find(|t| t.metric_name == metric_name).cloned()
951 {
952 if trend.values.len() >= 10 {
953 self.process_anomaly_detection(timestamp, metric_name, value, &trend);
954 }
955 }
956 }
957
958 fn process_anomaly_detection(
959 &mut self,
960 timestamp: f64,
961 metric_name: &str,
962 value: f64,
963 trend: &PerformanceTrend,
964 ) {
965 let mean = trend.values.iter().sum::<f64>() / trend.values.len() as f64;
966 let variance = trend.values.iter().map(|&x| (x - mean).powi(2)).sum::<f64>()
967 / trend.values.len() as f64;
968 let std_dev = variance.sqrt();
969
970 let threshold = 2.0 * std_dev;
971 let deviation = (value - mean).abs();
972
973 if deviation > threshold {
974 let severity = self.determine_anomaly_severity(deviation, std_dev);
975 let anomaly = self.create_anomaly(timestamp, metric_name, value, mean, severity);
976 self.add_anomaly(anomaly);
977 }
978 }
979
980 fn determine_anomaly_severity(&self, deviation: f64, std_dev: f64) -> AnomalySeverity {
981 if deviation > 3.0 * std_dev {
982 AnomalySeverity::Critical
983 } else if deviation > 2.5 * std_dev {
984 AnomalySeverity::Warning
985 } else {
986 AnomalySeverity::Info
987 }
988 }
989
990 fn create_anomaly(
991 &self,
992 timestamp: f64,
993 metric_name: &str,
994 actual_value: f64,
995 expected_value: f64,
996 severity: AnomalySeverity,
997 ) -> PerformanceAnomaly {
998 PerformanceAnomaly {
999 timestamp,
1000 metric_name: metric_name.to_string(),
1001 expected_value,
1002 actual_value,
1003 severity,
1004 description: format!(
1005 "{metric_name} deviated by {actual_value:.2} from expected {expected_value:.2}"
1006 ),
1007 suggested_action: self.get_anomaly_suggestion(
1008 metric_name,
1009 actual_value,
1010 expected_value,
1011 ),
1012 }
1013 }
1014
1015 fn add_anomaly(&mut self, anomaly: PerformanceAnomaly) {
1016 self.detected_anomalies.push(anomaly);
1017
1018 if self.detected_anomalies.len() > 100 {
1020 self.detected_anomalies.remove(0);
1021 }
1022 }
1023
1024 fn get_anomaly_suggestion(&self, metric_name: &str, actual: f64, expected: f64) -> String {
1025 match metric_name {
1026 "latency" if actual > expected => {
1027 "Consider enabling more aggressive quantization or reducing model complexity"
1028 .to_string()
1029 },
1030 "throughput" if actual < expected => {
1031 "Check for CPU/GPU bottlenecks or memory pressure".to_string()
1032 },
1033 "memory" if actual > expected => {
1034 "Enable memory optimization or reduce batch size".to_string()
1035 },
1036 "accuracy" if actual < expected => {
1037 "Review quantization settings or model configuration".to_string()
1038 },
1039 _ => "Monitor trend and consider adaptive optimization".to_string(),
1040 }
1041 }
1042
1043 fn check_and_trigger_adaptation(
1044 &mut self,
1045 timestamp: f64,
1046 latency_ms: f64,
1047 throughput: f32,
1048 memory_mb: f32,
1049 accuracy: f32,
1050 ) {
1051 let mut should_adapt = false;
1053 let mut trigger_metric = String::new();
1054
1055 for target in &self.adaptive_optimizer.optimization_targets {
1056 match target {
1057 OptimizationTarget::Latency
1058 if latency_ms > 100.0 => {
1059 should_adapt = true;
1061 trigger_metric = "latency".to_string();
1062 },
1063 OptimizationTarget::Throughput
1064 if throughput < 10.0 => {
1065 should_adapt = true;
1067 trigger_metric = "throughput".to_string();
1068 },
1069 OptimizationTarget::MemoryUsage
1070 if memory_mb > 1000.0 => {
1071 should_adapt = true;
1073 trigger_metric = "memory".to_string();
1074 },
1075 OptimizationTarget::Accuracy
1076 if accuracy < 0.9 => {
1077 should_adapt = true;
1079 trigger_metric = "accuracy".to_string();
1080 },
1081 OptimizationTarget::Balanced
1082 if (latency_ms > 200.0
1084 || throughput < 5.0
1085 || memory_mb > 1500.0
1086 || accuracy < 0.85)
1087 => {
1088 should_adapt = true;
1089 trigger_metric = "balanced".to_string();
1090 },
1091 _ => {},
1092 }
1093 }
1094
1095 if should_adapt {
1096 self.apply_adaptive_optimization(
1097 timestamp,
1098 &trigger_metric,
1099 latency_ms,
1100 throughput,
1101 memory_mb,
1102 accuracy,
1103 );
1104 }
1105 }
1106
1107 fn apply_adaptive_optimization(
1108 &mut self,
1109 timestamp: f64,
1110 trigger_metric: &str,
1111 _latency_ms: f64,
1112 _throughput: f32,
1113 _memory_mb: f32,
1114 _accuracy: f32,
1115 ) {
1116 let old_strategy = self.adaptive_optimizer.current_strategy;
1117
1118 let new_strategy = match trigger_metric {
1120 "latency" => OptimizationStrategy::LatencyOptimized,
1121 "throughput" => OptimizationStrategy::ThroughputOptimized,
1122 "memory" => OptimizationStrategy::MemoryOptimized,
1123 _ => OptimizationStrategy::Hybrid,
1124 };
1125
1126 if new_strategy != old_strategy {
1127 let improvement_ratio = self.estimate_strategy_improvement(old_strategy, new_strategy);
1128
1129 let adaptation = AdaptationRecord {
1130 timestamp,
1131 old_strategy,
1132 new_strategy,
1133 trigger_metric: trigger_metric.to_string(),
1134 improvement_ratio,
1135 confidence_score: 0.8, };
1137
1138 self.adaptive_optimizer.current_strategy = new_strategy;
1139 self.adaptive_optimizer.adaptation_history.push(adaptation);
1140
1141 if self.adaptive_optimizer.adaptation_history.len() > 50 {
1143 self.adaptive_optimizer.adaptation_history.remove(0);
1144 }
1145
1146 web_sys::console::log_1(
1147 &format!(
1148 "🤖 Adaptive optimization: {:?} -> {:?} (trigger: {}, improvement: {:.1}%)",
1149 old_strategy,
1150 new_strategy,
1151 trigger_metric,
1152 improvement_ratio * 100.0
1153 )
1154 .into(),
1155 );
1156 }
1157 }
1158
1159 fn estimate_strategy_improvement(
1160 &self,
1161 old: OptimizationStrategy,
1162 new: OptimizationStrategy,
1163 ) -> f32 {
1164 let historical_improvement = self.calculate_historical_improvement(old, new);
1166 let device_factor = self.get_device_performance_factor();
1167 let workload_factor = self.get_workload_complexity_factor();
1168
1169 let base_improvement = match (old, new) {
1171 (OptimizationStrategy::CPUPreferred, OptimizationStrategy::GPUPreferred) => 2.5,
1172 (OptimizationStrategy::Hybrid, OptimizationStrategy::MemoryOptimized) => 1.8,
1173 (OptimizationStrategy::MemoryOptimized, OptimizationStrategy::Hybrid) => 1.4,
1174 (OptimizationStrategy::CPUPreferred, OptimizationStrategy::MemoryOptimized) => 3.2,
1175 (OptimizationStrategy::GPUPreferred, OptimizationStrategy::MemoryOptimized) => 1.2,
1176 _ => 1.15, };
1178
1179 (base_improvement * historical_improvement * device_factor * workload_factor).min(5.0)
1180 }
1181
1182 fn calculate_historical_improvement(
1183 &self,
1184 old: OptimizationStrategy,
1185 new: OptimizationStrategy,
1186 ) -> f32 {
1187 if self.performance_trends.len() < 10 {
1189 return 1.0; }
1191
1192 let mut strategy_transitions = Vec::new();
1193 for i in 1..self.performance_trends.len() {
1194 let prev_trend = &self.performance_trends[i - 1];
1195 let curr_trend = &self.performance_trends[i];
1196
1197 let prev_strategy = self.infer_strategy_from_trend(prev_trend);
1199 let curr_strategy = self.infer_strategy_from_trend(curr_trend);
1200
1201 if prev_strategy == old && curr_strategy == new {
1202 let improvement = (curr_trend.trend_strength + 1.0) / 2.0; strategy_transitions.push(improvement);
1204 }
1205 }
1206
1207 if strategy_transitions.is_empty() {
1208 1.0
1209 } else {
1210 let avg_improvement: f32 =
1211 strategy_transitions.iter().sum::<f32>() / strategy_transitions.len() as f32;
1212 0.5 + avg_improvement }
1214 }
1215
1216 fn get_device_performance_factor(&self) -> f32 {
1217 let recent_samples: Vec<_> = self.resource_samples.iter().rev().take(50).collect();
1219 if recent_samples.is_empty() {
1220 return 1.0;
1221 }
1222
1223 let avg_cpu_usage: f32 = recent_samples
1224 .iter()
1225 .filter(|s| s.resource_type == ResourceType::CPU)
1226 .map(|s| s.value)
1227 .sum::<f32>()
1228 / recent_samples.len() as f32;
1229
1230 let avg_memory_usage: f32 = recent_samples
1231 .iter()
1232 .filter(|s| s.resource_type == ResourceType::WAMSMemory)
1233 .map(|s| s.value)
1234 .sum::<f32>()
1235 / recent_samples.len() as f32;
1236
1237 let cpu_factor = (100.0 - avg_cpu_usage) / 100.0;
1239 let memory_factor = (100.0 - avg_memory_usage) / 100.0;
1240
1241 (cpu_factor + memory_factor) / 2.0
1242 }
1243
1244 fn get_workload_complexity_factor(&self) -> f32 {
1245 if self.operation_profiles.is_empty() {
1247 return 1.0;
1248 }
1249
1250 let avg_duration: f64 = self.operation_profiles.iter().map(|p| p.duration_ms).sum::<f64>()
1251 / self.operation_profiles.len() as f64;
1252
1253 let memory_intensity: u64 =
1254 self.operation_profiles.iter().map(|p| p.memory_peak).max().unwrap_or(0) as u64;
1255
1256 let duration_factor = (avg_duration / 1000.0).min(2.0); let memory_factor = (memory_intensity as f64 / (100.0 * 1024.0 * 1024.0)).min(2.0); (1.0 + duration_factor + memory_factor) as f32 / 3.0
1261 }
1262
1263 fn infer_strategy_from_trend(&self, trend: &PerformanceTrend) -> OptimizationStrategy {
1264 if trend.trend_strength > 0.8 {
1266 OptimizationStrategy::MemoryOptimized
1267 } else if trend.trend_strength > 0.6 {
1268 OptimizationStrategy::GPUPreferred
1269 } else {
1270 OptimizationStrategy::Hybrid
1271 }
1272 }
1273
1274 fn get_battery_level(&self) -> f32 {
1276 #[cfg(target_arch = "wasm32")]
1277 {
1278 if let Some(window) = web_sys::window() {
1279 if let Ok(navigator) = js_sys::Reflect::get(&window, &"navigator".into()) {
1280 if let Ok(get_battery) = js_sys::Reflect::get(&navigator, &"getBattery".into())
1281 {
1282 return 0.8; }
1286 }
1287 }
1288 }
1289 1.0 }
1291
1292 fn estimate_power_consumption(&self) -> f32 {
1294 #[cfg(target_arch = "wasm32")]
1295 {
1296 let recent_samples: Vec<_> = self.resource_samples.iter().rev().take(10).collect();
1298 if recent_samples.is_empty() {
1299 return 5.0; }
1301
1302 let avg_cpu: f32 = recent_samples.iter().map(|s| s.cpu_usage).sum::<f32>()
1303 / recent_samples.len() as f32;
1304
1305 let avg_gpu: f32 = recent_samples.iter().map(|s| s.gpu_usage).sum::<f32>()
1306 / recent_samples.len() as f32;
1307
1308 let base_power = 2.0; let cpu_power = (avg_cpu / 100.0) * 8.0; let gpu_power = (avg_gpu / 100.0) * 15.0; base_power + cpu_power + gpu_power
1314 }
1315 #[cfg(not(target_arch = "wasm32"))]
1316 5.0 }
1318
1319 fn get_thermal_state(&self) -> f32 {
1321 #[cfg(target_arch = "wasm32")]
1322 {
1323 if let Some(window) = web_sys::window() {
1324 if let Ok(navigator) = js_sys::Reflect::get(&window, &"navigator".into()) {
1325 if let Ok(thermal_state) =
1326 js_sys::Reflect::get(&navigator, &"thermalState".into())
1327 {
1328 if let Some(state_str) = thermal_state.as_string() {
1329 return match state_str.as_str() {
1330 "nominal" => 0.2,
1331 "fair" => 0.4,
1332 "serious" => 0.6,
1333 "critical" => 0.9,
1334 _ => 0.3,
1335 };
1336 }
1337 }
1338 }
1339 }
1340
1341 let recent_samples: Vec<_> = self.resource_samples.iter().rev().take(20).collect();
1343 if recent_samples.len() < 5 {
1344 return 0.3; }
1346
1347 let avg_usage: f32 =
1348 recent_samples.iter().map(|s| (s.cpu_usage + s.gpu_usage) / 2.0).sum::<f32>()
1349 / recent_samples.len() as f32;
1350
1351 (avg_usage / 100.0).min(1.0)
1353 }
1354 #[cfg(not(target_arch = "wasm32"))]
1355 0.3 }
1357
1358 fn estimate_cpu_temperature(&self) -> f32 {
1360 #[cfg(target_arch = "wasm32")]
1361 {
1362 let thermal_state = self.get_thermal_state();
1364 let recent_cpu_usage =
1365 self.resource_samples.iter().rev().take(10).map(|s| s.cpu_usage).sum::<f32>()
1366 / 10.0_f32.max(self.resource_samples.len() as f32);
1367
1368 let base_temp = 35.0; let usage_temp = (recent_cpu_usage / 100.0) * 30.0; let thermal_temp = thermal_state * 20.0; base_temp + usage_temp + thermal_temp
1373 }
1374 #[cfg(not(target_arch = "wasm32"))]
1375 45.0 }
1377
1378 fn estimate_gpu_temperature(&self) -> f32 {
1380 #[cfg(target_arch = "wasm32")]
1381 {
1382 let thermal_state = self.get_thermal_state();
1384 let recent_gpu_usage =
1385 self.resource_samples.iter().rev().take(10).map(|s| s.gpu_usage).sum::<f32>()
1386 / 10.0_f32.max(self.resource_samples.len() as f32);
1387
1388 let base_temp = 40.0; let usage_temp = (recent_gpu_usage / 100.0) * 40.0; let thermal_temp = thermal_state * 25.0; base_temp + usage_temp + thermal_temp
1393 }
1394 #[cfg(not(target_arch = "wasm32"))]
1395 55.0 }
1397
1398 fn check_thermal_throttling(&mut self, thermal_state: f32, cpu_temp: f32, gpu_temp: f32) {
1400 let should_throttle = thermal_state > 0.7 || cpu_temp > 75.0 || gpu_temp > 80.0;
1401
1402 if should_throttle {
1403 if let Some(ref mut logger) = self.debug_logger {
1405 logger.info(&format!(
1406 "Thermal throttling detected: thermal_state={:.2}, cpu_temp={:.1}°C, gpu_temp={:.1}°C",
1407 thermal_state, cpu_temp, gpu_temp
1408 ), "thermal");
1409 }
1410
1411 let current_strategy = self.adaptive_optimizer.current_strategy;
1413 let target_strategy = match current_strategy {
1414 OptimizationStrategy::GPUPreferred => OptimizationStrategy::Hybrid,
1415 OptimizationStrategy::Hybrid => OptimizationStrategy::MemoryOptimized,
1416 _ => OptimizationStrategy::MemoryOptimized,
1417 };
1418
1419 if current_strategy != target_strategy {
1420 let estimated_improvement =
1421 self.estimate_strategy_improvement(current_strategy, target_strategy);
1422 if estimated_improvement > 1.1 {
1423 self.adaptive_optimizer.current_strategy = target_strategy;
1425
1426 if let Some(ref mut logger) = self.debug_logger {
1427 logger.info(&format!(
1428 "Thermal adaptive optimization: switched from {current_strategy:?} to {target_strategy:?} (estimated {:.1}% improvement)",
1429 (estimated_improvement - 1.0) * 100.0
1430 ), "thermal");
1431 }
1432 }
1433 }
1434 }
1435 }
1436
1437 pub fn get_thermal_recommendations(&self) -> Vec<String> {
1439 let mut recommendations = Vec::new();
1440
1441 let recent_thermal = self
1442 .resource_samples
1443 .iter()
1444 .rev()
1445 .take(10)
1446 .filter(|s| s.resource_type == ResourceType::Thermal)
1447 .map(|s| s.value)
1448 .sum::<f32>()
1449 / 10.0;
1450
1451 let recent_battery = self
1452 .resource_samples
1453 .iter()
1454 .rev()
1455 .take(10)
1456 .filter(|s| s.resource_type == ResourceType::Battery)
1457 .map(|s| s.value)
1458 .sum::<f32>()
1459 / 10.0;
1460
1461 if recent_thermal > 0.7 {
1462 recommendations.push(
1463 "High thermal state detected - consider reducing model complexity".to_string(),
1464 );
1465 recommendations
1466 .push("Switch to CPU-based inference to reduce GPU heat generation".to_string());
1467 }
1468
1469 if recent_battery < 0.2 {
1470 recommendations.push("Low battery level - enable power saving mode".to_string());
1471 recommendations.push("Reduce inference frequency to conserve battery".to_string());
1472 }
1473
1474 if recent_thermal > 0.5 && recent_battery < 0.3 {
1475 recommendations.push(
1476 "Combined thermal and battery stress - enable aggressive power management"
1477 .to_string(),
1478 );
1479 }
1480
1481 recommendations
1482 }
1483}
1484
1485#[wasm_bindgen]
1487pub fn create_development_profiler() -> PerformanceProfiler {
1488 PerformanceProfiler::new(ProfilerConfig::development())
1489}
1490
1491#[wasm_bindgen]
1493pub fn create_production_profiler() -> PerformanceProfiler {
1494 PerformanceProfiler::new(ProfilerConfig::production())
1495}
1496
1497#[cfg(test)]
1498mod tests {
1499 use super::*;
1500
1501 #[test]
1502 fn test_profiler_config() {
1503 let config = ProfilerConfig::development();
1504 assert!(config.enabled());
1505 assert!(config.detailed_timing());
1506
1507 let prod_config = ProfilerConfig::production();
1508 assert!(!prod_config.detailed_timing());
1509 }
1510
1511 #[test]
1512 #[cfg(target_arch = "wasm32")]
1513 fn test_operation_profiling() {
1514 let config = ProfilerConfig::development();
1515 let mut profiler = PerformanceProfiler::new(config);
1516
1517 profiler.start_operation("test_op", OperationType::MatMul);
1518 let duration = profiler.end_operation("test_op");
1519
1520 assert!(duration.is_some());
1521 assert_eq!(profiler.operation_profiles.len(), 1);
1522 }
1523
1524 #[test]
1525 #[cfg(target_arch = "wasm32")]
1526 fn test_performance_analysis() {
1527 let config = ProfilerConfig::development();
1528 let mut profiler = PerformanceProfiler::new(config);
1529
1530 profiler.start_operation("op1", OperationType::Attention);
1531 profiler.end_operation("op1");
1532
1533 let summary = profiler.analyze_performance_internal();
1534 assert_eq!(summary.operation_count, 1);
1535 assert!(summary.total_time_ms >= 0.0);
1536 }
1537
1538 #[test]
1539 #[cfg(not(target_arch = "wasm32"))]
1540 fn test_profiler_config_only() {
1541 let config = ProfilerConfig::development();
1543 let profiler = PerformanceProfiler::new(config);
1544 assert!(profiler.operation_profiles.is_empty());
1545 assert!(profiler.performance_trends.is_empty());
1546 }
1547}