1use chrono::{DateTime, Utc};
12use serde::{Deserialize, Serialize};
13use std::collections::{HashMap, VecDeque};
14use std::sync::Arc;
15use std::time::Duration;
16use tokio::sync::RwLock;
17use tracing::{debug, error, info};
18use uuid::Uuid;
19
20#[derive(Debug, Clone, Serialize, Deserialize)]
22pub struct ProfilingConfig {
23 pub enabled: bool,
25
26 pub cpu_profiling: CpuProfilingConfig,
28
29 pub memory_profiling: MemoryProfilingConfig,
31
32 pub async_profiling: AsyncProfilingConfig,
34
35 pub flame_graph: FlameGraphConfig,
37
38 pub thresholds: PerformanceThresholds,
40}
41
42#[derive(Debug, Clone, Serialize, Deserialize)]
44pub struct CpuProfilingConfig {
45 pub enabled: bool,
47
48 pub sampling_frequency_hz: u64,
50
51 pub max_samples: usize,
53
54 pub profile_duration_secs: u64,
56
57 pub max_stack_depth: usize,
59
60 pub call_graph_enabled: bool,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct MemoryProfilingConfig {
67 pub enabled: bool,
69
70 pub track_allocations: bool,
72
73 pub track_leaks: bool,
75
76 pub max_allocations: usize,
78
79 pub snapshot_interval_secs: u64,
81
82 pub heap_profiling: bool,
84}
85
86#[derive(Debug, Clone, Serialize, Deserialize)]
88pub struct AsyncProfilingConfig {
89 pub enabled: bool,
91
92 pub track_spawns: bool,
94
95 pub track_completion: bool,
97
98 pub max_tracked_tasks: usize,
100
101 pub task_timeout_threshold_ms: u64,
103}
104
105#[derive(Debug, Clone, Serialize, Deserialize)]
107pub struct FlameGraphConfig {
108 pub enabled: bool,
110
111 pub width: u32,
113
114 pub height: u32,
116
117 pub color_scheme: FlameGraphColorScheme,
119
120 pub min_frame_width: u32,
122
123 pub show_function_names: bool,
125
126 pub reverse: bool,
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize)]
132#[serde(rename_all = "snake_case")]
133pub enum FlameGraphColorScheme {
134 Hot,
135 Cold,
136 Rainbow,
137 Aqua,
138 Orange,
139 Red,
140 Green,
141 Blue,
142 Custom(Vec<String>),
143}
144
145#[derive(Debug, Clone, Serialize, Deserialize)]
147pub struct PerformanceThresholds {
148 pub cpu_threshold_percent: f64,
150
151 pub memory_threshold_mb: f64,
153
154 pub function_call_threshold_ms: u64,
156
157 pub async_task_threshold_ms: u64,
159
160 pub allocation_threshold_bytes: usize,
162}
163
164pub struct PerformanceProfiler {
166 config: ProfilingConfig,
167 cpu_samples: Arc<RwLock<VecDeque<CpuSample>>>,
168 memory_snapshots: Arc<RwLock<VecDeque<MemorySnapshot>>>,
169 async_tasks: Arc<RwLock<HashMap<Uuid, AsyncTaskProfile>>>,
170 function_calls: Arc<RwLock<HashMap<String, FunctionCallProfile>>>,
171 current_session: Arc<RwLock<Option<ProfilingSession>>>,
172}
173
174#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct CpuSample {
177 pub timestamp: DateTime<Utc>,
179
180 pub stack_trace: Vec<StackFrame>,
182
183 pub cpu_usage: f64,
185
186 pub thread_id: u64,
188
189 pub process_id: u32,
191}
192
193#[derive(Debug, Clone, Serialize, Deserialize)]
195pub struct StackFrame {
196 pub function_name: String,
198
199 pub module_name: Option<String>,
201
202 pub file_name: Option<String>,
204
205 pub line_number: Option<u32>,
207
208 pub address: Option<u64>,
210
211 pub offset: Option<u64>,
213}
214
215#[derive(Debug, Clone, Serialize, Deserialize)]
217pub struct MemorySnapshot {
218 pub timestamp: DateTime<Utc>,
220
221 pub total_memory_bytes: u64,
223
224 pub heap_memory_bytes: u64,
226
227 pub stack_memory_bytes: u64,
229
230 pub allocation_count: u64,
232
233 pub allocations_by_size: HashMap<usize, u64>,
235
236 pub allocations_by_location: HashMap<String, AllocationInfo>,
238}
239
240#[derive(Debug, Clone, Serialize, Deserialize)]
242pub struct AllocationInfo {
243 pub total_size: u64,
245
246 pub count: u64,
248
249 pub average_size: f64,
251
252 pub stack_trace: Vec<StackFrame>,
254}
255
256#[derive(Debug, Clone, Serialize, Deserialize)]
258pub struct AsyncTaskProfile {
259 pub task_id: Uuid,
261
262 pub task_name: String,
264
265 pub spawn_time: DateTime<Utc>,
267
268 pub completion_time: Option<DateTime<Utc>>,
270
271 pub duration_ms: Option<u64>,
273
274 pub state: AsyncTaskState,
276
277 pub cpu_time_ms: u64,
279
280 pub memory_bytes: u64,
282
283 pub yield_count: u64,
285
286 pub parent_task_id: Option<Uuid>,
288}
289
290#[derive(Debug, Clone, Serialize, Deserialize)]
292#[serde(rename_all = "snake_case")]
293pub enum AsyncTaskState {
294 Running,
295 Suspended,
296 Completed,
297 Failed,
298 Cancelled,
299}
300
301#[derive(Debug, Clone, Serialize, Deserialize)]
303pub struct FunctionCallProfile {
304 pub function_name: String,
306
307 pub call_count: u64,
309
310 pub total_time_us: u64,
312
313 pub average_time_us: f64,
315
316 pub min_time_us: u64,
318
319 pub max_time_us: u64,
321
322 pub percentiles: HashMap<String, u64>,
324
325 pub call_history: VecDeque<FunctionCall>,
327}
328
329#[derive(Debug, Clone, Serialize, Deserialize)]
331pub struct FunctionCall {
332 pub timestamp: DateTime<Utc>,
334
335 pub duration_us: u64,
337
338 pub arguments: Option<String>,
340
341 pub return_value: Option<String>,
343
344 pub error: Option<String>,
346}
347
348#[derive(Debug, Clone, Serialize, Deserialize)]
350pub struct ProfilingSession {
351 pub session_id: Uuid,
353
354 pub name: String,
356
357 pub start_time: DateTime<Utc>,
359
360 pub end_time: Option<DateTime<Utc>>,
362
363 pub duration_ms: Option<u64>,
365
366 pub session_type: ProfilingSessionType,
368
369 pub config: ProfilingConfig,
371
372 pub stats: ProfilingStats,
374}
375
376#[derive(Debug, Clone, Serialize, Deserialize)]
378#[serde(rename_all = "snake_case")]
379pub enum ProfilingSessionType {
380 Manual,
381 Scheduled,
382 Triggered,
383 Continuous,
384}
385
386#[derive(Debug, Clone, Serialize, Deserialize)]
388pub struct ProfilingStats {
389 pub total_samples: u64,
391
392 pub cpu_samples: u64,
394
395 pub memory_snapshots: u64,
397
398 pub async_tasks_tracked: u64,
400
401 pub function_calls_tracked: u64,
403
404 pub hotspots_identified: u64,
406
407 pub performance_issues: u64,
409}
410
411#[derive(Debug, Clone, Serialize, Deserialize)]
413pub struct FlameGraphData {
414 pub nodes: Vec<FlameGraphNode>,
416
417 pub total_samples: u64,
419
420 pub generated_at: DateTime<Utc>,
422
423 pub config: FlameGraphConfig,
425}
426
427#[derive(Debug, Clone, Serialize, Deserialize)]
429pub struct FlameGraphNode {
430 pub id: Uuid,
432
433 pub function_name: String,
435
436 pub module_name: Option<String>,
438
439 pub sample_count: u64,
441
442 pub percentage: f64,
444
445 pub self_time_us: u64,
447
448 pub total_time_us: u64,
450
451 pub depth: u32,
453
454 pub parent_id: Option<Uuid>,
456
457 pub children: Vec<Uuid>,
459
460 pub color: String,
462}
463
464#[derive(Debug, Clone, Serialize, Deserialize)]
466pub struct PerformanceHotspot {
467 pub id: Uuid,
469
470 pub hotspot_type: HotspotType,
472
473 pub location: String,
475
476 pub severity: HotspotSeverity,
478
479 pub sample_count: u64,
481
482 pub cpu_percentage: f64,
484
485 pub average_time_us: u64,
487
488 pub memory_bytes: u64,
490
491 pub description: String,
493
494 pub recommendations: Vec<String>,
496}
497
498#[derive(Debug, Clone, Serialize, Deserialize)]
500#[serde(rename_all = "snake_case")]
501pub enum HotspotType {
502 CpuIntensive,
503 MemoryIntensive,
504 IoBlocking,
505 LockContention,
506 AsyncOverhead,
507 GarbageCollection,
508 SystemCall,
509 NetworkIo,
510 DatabaseQuery,
511 FileIo,
512}
513
514#[derive(Debug, Clone, Serialize, Deserialize)]
516#[serde(rename_all = "snake_case")]
517pub enum HotspotSeverity {
518 Critical,
519 High,
520 Medium,
521 Low,
522 Info,
523}
524
525impl PerformanceProfiler {
526 pub fn new(config: ProfilingConfig) -> Self {
528 Self {
529 config,
530 cpu_samples: Arc::new(RwLock::new(VecDeque::new())),
531 memory_snapshots: Arc::new(RwLock::new(VecDeque::new())),
532 async_tasks: Arc::new(RwLock::new(HashMap::new())),
533 function_calls: Arc::new(RwLock::new(HashMap::new())),
534 current_session: Arc::new(RwLock::new(None)),
535 }
536 }
537
538 pub async fn start_session(
540 &self,
541 name: String,
542 session_type: ProfilingSessionType,
543 ) -> Result<Uuid, ProfilingError> {
544 if !self.config.enabled {
545 return Err(ProfilingError::Disabled);
546 }
547
548 let session_id = Uuid::new_v4();
549 let session = ProfilingSession {
550 session_id,
551 name: name.clone(),
552 start_time: Utc::now(),
553 end_time: None,
554 duration_ms: None,
555 session_type,
556 config: self.config.clone(),
557 stats: ProfilingStats {
558 total_samples: 0,
559 cpu_samples: 0,
560 memory_snapshots: 0,
561 async_tasks_tracked: 0,
562 function_calls_tracked: 0,
563 hotspots_identified: 0,
564 performance_issues: 0,
565 },
566 };
567
568 {
569 let mut current_session = self.current_session.write().await;
570 *current_session = Some(session);
571 }
572
573 self.start_cpu_profiling().await;
575 self.start_memory_profiling().await;
576 self.start_async_profiling().await;
577
578 info!("Started profiling session: {} ({})", session_id, name);
579 Ok(session_id)
580 }
581
582 pub async fn stop_session(&self) -> Result<ProfilingSession, ProfilingError> {
584 let mut current_session = self.current_session.write().await;
585
586 if let Some(mut session) = current_session.take() {
587 let now = Utc::now();
588 session.end_time = Some(now);
589 session.duration_ms = Some((now - session.start_time).num_milliseconds() as u64);
590
591 session.stats.cpu_samples = self.cpu_samples.read().await.len() as u64;
593 session.stats.memory_snapshots = self.memory_snapshots.read().await.len() as u64;
594 session.stats.async_tasks_tracked = self.async_tasks.read().await.len() as u64;
595 session.stats.function_calls_tracked = self.function_calls.read().await.len() as u64;
596 session.stats.total_samples = session.stats.cpu_samples
597 + session.stats.memory_snapshots
598 + session.stats.async_tasks_tracked;
599
600 info!(
601 "Stopped profiling session: {} (duration: {}ms)",
602 session.session_id,
603 session.duration_ms.unwrap_or(0)
604 );
605
606 Ok(session)
607 } else {
608 Err(ProfilingError::NoActiveSession)
609 }
610 }
611
612 async fn start_cpu_profiling(&self) {
614 if !self.config.cpu_profiling.enabled {
615 return;
616 }
617
618 let cpu_samples = self.cpu_samples.clone();
619 let config = self.config.cpu_profiling.clone();
620
621 tokio::spawn(async move {
622 let mut interval =
623 tokio::time::interval(Duration::from_millis(1000 / config.sampling_frequency_hz));
624
625 loop {
626 interval.tick().await;
627
628 let sample = CpuSample {
630 timestamp: Utc::now(),
631 stack_trace: Self::collect_stack_trace(&config).await,
632 cpu_usage: Self::get_cpu_usage().await,
633 thread_id: Self::get_current_thread_id(),
634 process_id: std::process::id(),
635 };
636
637 let mut samples = cpu_samples.write().await;
638 samples.push_back(sample);
639
640 if samples.len() > config.max_samples {
642 samples.pop_front();
643 }
644 }
645 });
646 }
647
648 async fn start_memory_profiling(&self) {
650 if !self.config.memory_profiling.enabled {
651 return;
652 }
653
654 let memory_snapshots = self.memory_snapshots.clone();
655 let config = self.config.memory_profiling.clone();
656
657 tokio::spawn(async move {
658 let mut interval =
659 tokio::time::interval(Duration::from_secs(config.snapshot_interval_secs));
660
661 loop {
662 interval.tick().await;
663
664 let snapshot = Self::collect_memory_snapshot(&config).await;
665 let mut snapshots = memory_snapshots.write().await;
666 snapshots.push_back(snapshot);
667
668 if snapshots.len() > 1000 {
670 snapshots.pop_front();
671 }
672 }
673 });
674 }
675
676 async fn start_async_profiling(&self) {
678 if !self.config.async_profiling.enabled {
679 return;
680 }
681
682 debug!("Async profiling started");
685 }
686
687 async fn collect_stack_trace(config: &CpuProfilingConfig) -> Vec<StackFrame> {
689 let mut frames = Vec::new();
692
693 for i in 0..std::cmp::min(5, config.max_stack_depth) {
695 frames.push(StackFrame {
696 function_name: format!("function_{i}"),
697 module_name: Some("mcp_server".to_string()),
698 file_name: Some("main.rs".to_string()),
699 line_number: Some(42 + i as u32),
700 address: Some(0x1000 + i as u64 * 0x100),
701 offset: Some(i as u64 * 8),
702 });
703 }
704
705 frames
706 }
707
708 async fn get_cpu_usage() -> f64 {
710 use std::collections::hash_map::DefaultHasher;
712 use std::hash::{Hash, Hasher};
713 let mut hasher = DefaultHasher::new();
714 std::thread::current().id().hash(&mut hasher);
715 (hasher.finish() % 100) as f64
716 }
717
718 fn get_current_thread_id() -> u64 {
720 use std::collections::hash_map::DefaultHasher;
722 use std::hash::{Hash, Hasher};
723 let mut hasher = DefaultHasher::new();
724 std::thread::current().id().hash(&mut hasher);
725 hasher.finish()
726 }
727
728 async fn collect_memory_snapshot(_config: &MemoryProfilingConfig) -> MemorySnapshot {
730 let mut allocations_by_size = HashMap::new();
731 let mut allocations_by_location = HashMap::new();
732
733 for i in 0..10 {
735 let size = 1024 * (i + 1);
736 allocations_by_size.insert(size, i as u64 + 1);
737
738 allocations_by_location.insert(
739 format!("location_{i}"),
740 AllocationInfo {
741 total_size: size as u64,
742 count: i as u64 + 1,
743 average_size: size as f64,
744 stack_trace: vec![],
745 },
746 );
747 }
748
749 MemorySnapshot {
750 timestamp: Utc::now(),
751 total_memory_bytes: 1024 * 1024 * 100, heap_memory_bytes: 1024 * 1024 * 80, stack_memory_bytes: 1024 * 1024 * 20, allocation_count: 1000,
755 allocations_by_size,
756 allocations_by_location,
757 }
758 }
759
760 pub async fn generate_flame_graph(&self) -> Result<FlameGraphData, ProfilingError> {
762 if !self.config.flame_graph.enabled {
763 return Err(ProfilingError::FlameGraphDisabled);
764 }
765
766 let cpu_samples = self.cpu_samples.read().await;
767 let mut nodes: Vec<FlameGraphNode> = Vec::new();
768 let mut node_map = HashMap::new();
769 let total_samples = cpu_samples.len() as u64;
770
771 if total_samples == 0 {
772 return Err(ProfilingError::InsufficientData);
773 }
774
775 for sample in cpu_samples.iter() {
777 let mut parent_id = None;
778
779 for (depth, frame) in sample.stack_trace.iter().enumerate() {
780 let key = format!("{}::{}", frame.function_name, depth);
781
782 if let Some(node_id) = node_map.get(&key) {
783 if let Some(node) = nodes.iter_mut().find(|n| n.id == *node_id) {
785 node.sample_count += 1;
786 node.percentage = (node.sample_count as f64 / total_samples as f64) * 100.0;
787 }
788 } else {
789 let node_id = Uuid::new_v4();
791 let node = FlameGraphNode {
792 id: node_id,
793 function_name: frame.function_name.clone(),
794 module_name: frame.module_name.clone(),
795 sample_count: 1,
796 percentage: (1.0 / total_samples as f64) * 100.0,
797 self_time_us: 1000, total_time_us: 1000,
799 depth: depth as u32,
800 parent_id,
801 children: Vec::new(),
802 color: self.get_flame_graph_color(depth).await,
803 };
804
805 nodes.push(node);
806 node_map.insert(key, node_id);
807 }
808
809 parent_id = node_map
810 .get(&format!("{}::{}", frame.function_name, depth))
811 .copied();
812 }
813 }
814
815 let mut parent_child_map: HashMap<Uuid, Vec<Uuid>> = HashMap::new();
817 for node in &nodes {
818 if let Some(parent_id) = node.parent_id {
819 parent_child_map.entry(parent_id).or_default().push(node.id);
820 }
821 }
822
823 for node in &mut nodes {
825 if let Some(children) = parent_child_map.get(&node.id) {
826 node.children = children.clone();
827 }
828 }
829
830 Ok(FlameGraphData {
831 nodes,
832 total_samples,
833 generated_at: Utc::now(),
834 config: self.config.flame_graph.clone(),
835 })
836 }
837
838 async fn get_flame_graph_color(&self, depth: usize) -> String {
840 match &self.config.flame_graph.color_scheme {
841 FlameGraphColorScheme::Hot => {
842 let colors = ["#FF0000", "#FF4500", "#FF8C00", "#FFD700", "#FFFF00"];
843 colors[depth % colors.len()].to_string()
844 }
845 FlameGraphColorScheme::Cold => {
846 let colors = ["#0000FF", "#4169E1", "#00BFFF", "#87CEEB", "#E0F6FF"];
847 colors[depth % colors.len()].to_string()
848 }
849 FlameGraphColorScheme::Rainbow => {
850 let colors = [
851 "#FF0000", "#FF8000", "#FFFF00", "#00FF00", "#0000FF", "#8000FF",
852 ];
853 colors[depth % colors.len()].to_string()
854 }
855 FlameGraphColorScheme::Custom(colors) => colors[depth % colors.len()].clone(),
856 _ => "#007bff".to_string(),
857 }
858 }
859
860 pub async fn identify_hotspots(&self) -> Result<Vec<PerformanceHotspot>, ProfilingError> {
862 let mut hotspots = Vec::new();
863
864 let cpu_samples = self.cpu_samples.read().await;
866 let function_calls = self.function_calls.read().await;
867
868 let mut function_counts = HashMap::new();
870 for sample in cpu_samples.iter() {
871 for frame in &sample.stack_trace {
872 *function_counts
873 .entry(frame.function_name.clone())
874 .or_insert(0) += 1;
875 }
876 }
877
878 let total_samples = cpu_samples.len() as u64;
880 for (function_name, count) in function_counts {
881 let percentage = (count as f64 / total_samples as f64) * 100.0;
882
883 if percentage > self.config.thresholds.cpu_threshold_percent {
884 let hotspot = PerformanceHotspot {
885 id: Uuid::new_v4(),
886 hotspot_type: HotspotType::CpuIntensive,
887 location: function_name.clone(),
888 severity: if percentage > 50.0 {
889 HotspotSeverity::Critical
890 } else if percentage > 25.0 {
891 HotspotSeverity::High
892 } else {
893 HotspotSeverity::Medium
894 },
895 sample_count: count,
896 cpu_percentage: percentage,
897 average_time_us: function_calls
898 .get(&function_name)
899 .map(|fc| fc.average_time_us as u64)
900 .unwrap_or(0),
901 memory_bytes: 0, description: format!(
903 "Function '{function_name}' consuming {percentage:.1}% of CPU time"
904 ),
905 recommendations: vec![
906 "Consider optimizing the algorithm".to_string(),
907 "Profile at a more granular level".to_string(),
908 "Check for unnecessary computations".to_string(),
909 ],
910 };
911
912 hotspots.push(hotspot);
913 }
914 }
915
916 let memory_snapshots = self.memory_snapshots.read().await;
918 if let Some(latest_snapshot) = memory_snapshots.back() {
919 for (location, allocation_info) in &latest_snapshot.allocations_by_location {
920 if allocation_info.total_size
921 > self.config.thresholds.allocation_threshold_bytes as u64
922 {
923 let hotspot = PerformanceHotspot {
924 id: Uuid::new_v4(),
925 hotspot_type: HotspotType::MemoryIntensive,
926 location: location.clone(),
927 severity: if allocation_info.total_size > 1024 * 1024 * 100 {
928 HotspotSeverity::Critical
929 } else if allocation_info.total_size > 1024 * 1024 * 50 {
930 HotspotSeverity::High
931 } else {
932 HotspotSeverity::Medium
933 },
934 sample_count: allocation_info.count,
935 cpu_percentage: 0.0,
936 average_time_us: 0,
937 memory_bytes: allocation_info.total_size,
938 description: format!(
939 "Location '{}' allocated {} bytes",
940 location, allocation_info.total_size
941 ),
942 recommendations: vec![
943 "Consider memory pooling".to_string(),
944 "Check for memory leaks".to_string(),
945 "Optimize data structures".to_string(),
946 ],
947 };
948
949 hotspots.push(hotspot);
950 }
951 }
952 }
953
954 hotspots.sort_by(|a, b| b.cpu_percentage.partial_cmp(&a.cpu_percentage).unwrap());
955 Ok(hotspots)
956 }
957
958 pub async fn record_function_call(&self, function_name: String, duration_us: u64) {
960 let mut function_calls = self.function_calls.write().await;
961 let profile = function_calls
962 .entry(function_name.clone())
963 .or_insert_with(|| FunctionCallProfile {
964 function_name: function_name.clone(),
965 call_count: 0,
966 total_time_us: 0,
967 average_time_us: 0.0,
968 min_time_us: u64::MAX,
969 max_time_us: 0,
970 percentiles: HashMap::new(),
971 call_history: VecDeque::new(),
972 });
973
974 profile.call_count += 1;
975 profile.total_time_us += duration_us;
976 profile.average_time_us = profile.total_time_us as f64 / profile.call_count as f64;
977 profile.min_time_us = profile.min_time_us.min(duration_us);
978 profile.max_time_us = profile.max_time_us.max(duration_us);
979
980 let call = FunctionCall {
981 timestamp: Utc::now(),
982 duration_us,
983 arguments: None,
984 return_value: None,
985 error: None,
986 };
987
988 profile.call_history.push_back(call);
989
990 if profile.call_history.len() > 1000 {
992 profile.call_history.pop_front();
993 }
994 }
995
996 pub async fn get_current_session(&self) -> Option<ProfilingSession> {
998 let session = self.current_session.read().await;
999 session.clone()
1000 }
1001
1002 pub async fn get_statistics(&self) -> ProfilingStats {
1004 let cpu_samples = self.cpu_samples.read().await.len() as u64;
1005 let memory_snapshots = self.memory_snapshots.read().await.len() as u64;
1006 let async_tasks = self.async_tasks.read().await.len() as u64;
1007 let function_calls = self.function_calls.read().await.len() as u64;
1008
1009 ProfilingStats {
1010 total_samples: cpu_samples + memory_snapshots + async_tasks,
1011 cpu_samples,
1012 memory_snapshots,
1013 async_tasks_tracked: async_tasks,
1014 function_calls_tracked: function_calls,
1015 hotspots_identified: 0, performance_issues: 0, }
1018 }
1019}
1020
1021impl Default for ProfilingConfig {
1022 fn default() -> Self {
1023 Self {
1024 enabled: false, cpu_profiling: CpuProfilingConfig {
1026 enabled: false,
1027 sampling_frequency_hz: 100,
1028 max_samples: 10000,
1029 profile_duration_secs: 60,
1030 max_stack_depth: 32,
1031 call_graph_enabled: true,
1032 },
1033 memory_profiling: MemoryProfilingConfig {
1034 enabled: false,
1035 track_allocations: true,
1036 track_leaks: true,
1037 max_allocations: 10000,
1038 snapshot_interval_secs: 10,
1039 heap_profiling: true,
1040 },
1041 async_profiling: AsyncProfilingConfig {
1042 enabled: false,
1043 track_spawns: true,
1044 track_completion: true,
1045 max_tracked_tasks: 1000,
1046 task_timeout_threshold_ms: 5000,
1047 },
1048 flame_graph: FlameGraphConfig {
1049 enabled: true,
1050 width: 1200,
1051 height: 600,
1052 color_scheme: FlameGraphColorScheme::Hot,
1053 min_frame_width: 1,
1054 show_function_names: true,
1055 reverse: false,
1056 },
1057 thresholds: PerformanceThresholds {
1058 cpu_threshold_percent: 10.0,
1059 memory_threshold_mb: 100.0,
1060 function_call_threshold_ms: 100,
1061 async_task_threshold_ms: 1000,
1062 allocation_threshold_bytes: 1024 * 1024, },
1064 }
1065 }
1066}
1067
1068#[derive(Debug, thiserror::Error)]
1070pub enum ProfilingError {
1071 #[error("Profiling is disabled")]
1072 Disabled,
1073
1074 #[error("No active profiling session")]
1075 NoActiveSession,
1076
1077 #[error("Flame graph generation is disabled")]
1078 FlameGraphDisabled,
1079
1080 #[error("Insufficient data for analysis")]
1081 InsufficientData,
1082
1083 #[error("Configuration error: {0}")]
1084 Configuration(String),
1085
1086 #[error("IO error: {0}")]
1087 Io(#[from] std::io::Error),
1088
1089 #[error("Serialization error: {0}")]
1090 Serialization(#[from] serde_json::Error),
1091}
1092
1093#[macro_export]
1095macro_rules! profile_function {
1096 ($profiler:expr, $function_name:expr, $code:block) => {{
1097 let start = std::time::Instant::now();
1098 let result = $code;
1099 let duration = start.elapsed();
1100
1101 if let Some(profiler) = $profiler.as_ref() {
1102 profiler
1103 .record_function_call($function_name.to_string(), duration.as_micros() as u64)
1104 .await;
1105 }
1106
1107 result
1108 }};
1109}
1110
1111#[cfg(test)]
1112mod tests {
1113 use super::*;
1114
1115 #[test]
1116 fn test_profiling_config_creation() {
1117 let config = ProfilingConfig::default();
1118 assert!(!config.enabled); assert!(!config.cpu_profiling.enabled);
1120 assert!(!config.memory_profiling.enabled);
1121 assert!(config.flame_graph.enabled);
1122 }
1123
1124 #[tokio::test]
1125 async fn test_profiler_creation() {
1126 let config = ProfilingConfig::default();
1127 let profiler = PerformanceProfiler::new(config);
1128
1129 let stats = profiler.get_statistics().await;
1130 assert_eq!(stats.total_samples, 0);
1131 assert_eq!(stats.cpu_samples, 0);
1132 assert_eq!(stats.memory_snapshots, 0);
1133 }
1134
1135 #[tokio::test]
1136 async fn test_function_call_recording() {
1137 let config = ProfilingConfig::default();
1138 let profiler = PerformanceProfiler::new(config);
1139
1140 profiler
1141 .record_function_call("test_function".to_string(), 1000)
1142 .await;
1143
1144 let function_calls = profiler.function_calls.read().await;
1145 assert!(function_calls.contains_key("test_function"));
1146
1147 let profile = function_calls.get("test_function").unwrap();
1148 assert_eq!(profile.call_count, 1);
1149 assert_eq!(profile.total_time_us, 1000);
1150 }
1151
1152 #[tokio::test]
1153 async fn test_session_management() {
1154 let config = ProfilingConfig {
1155 enabled: true,
1156 ..Default::default()
1157 };
1158 let profiler = PerformanceProfiler::new(config);
1159
1160 let session_id = profiler
1162 .start_session("test_session".to_string(), ProfilingSessionType::Manual)
1163 .await
1164 .unwrap();
1165
1166 assert!(profiler.get_current_session().await.is_some());
1167
1168 let session = profiler.stop_session().await.unwrap();
1170 assert_eq!(session.session_id, session_id);
1171 assert_eq!(session.name, "test_session");
1172 assert!(session.end_time.is_some());
1173 }
1174
1175 #[test]
1176 fn test_cpu_profiling_config() {
1177 let config = CpuProfilingConfig {
1178 enabled: true,
1179 sampling_frequency_hz: 100,
1180 max_samples: 1000,
1181 profile_duration_secs: 60,
1182 max_stack_depth: 32,
1183 call_graph_enabled: true,
1184 };
1185
1186 assert!(config.enabled);
1187 assert_eq!(config.sampling_frequency_hz, 100);
1188 assert_eq!(config.max_samples, 1000);
1189 assert_eq!(config.profile_duration_secs, 60);
1190 assert_eq!(config.max_stack_depth, 32);
1191 assert!(config.call_graph_enabled);
1192 }
1193
1194 #[test]
1195 fn test_memory_profiling_config() {
1196 let config = MemoryProfilingConfig {
1197 enabled: true,
1198 track_allocations: true,
1199 track_leaks: true,
1200 max_allocations: 10000,
1201 snapshot_interval_secs: 30,
1202 heap_profiling: true,
1203 };
1204
1205 assert!(config.enabled);
1206 assert!(config.track_allocations);
1207 assert!(config.track_leaks);
1208 assert_eq!(config.max_allocations, 10000);
1209 assert_eq!(config.snapshot_interval_secs, 30);
1210 assert!(config.heap_profiling);
1211 }
1212
1213 #[test]
1214 fn test_async_profiling_config() {
1215 let config = AsyncProfilingConfig {
1216 enabled: true,
1217 track_spawns: true,
1218 track_completion: true,
1219 max_tracked_tasks: 5000,
1220 task_timeout_threshold_ms: 1000,
1221 };
1222
1223 assert!(config.enabled);
1224 assert!(config.track_spawns);
1225 assert!(config.track_completion);
1226 assert_eq!(config.max_tracked_tasks, 5000);
1227 assert_eq!(config.task_timeout_threshold_ms, 1000);
1228 }
1229
1230 #[test]
1231 fn test_flame_graph_config() {
1232 let config = FlameGraphConfig {
1233 enabled: true,
1234 width: 1920,
1235 height: 1080,
1236 color_scheme: FlameGraphColorScheme::Hot,
1237 min_frame_width: 1,
1238 show_function_names: true,
1239 reverse: false,
1240 };
1241
1242 assert!(config.enabled);
1243 assert_eq!(config.width, 1920);
1244 assert_eq!(config.height, 1080);
1245 assert!(matches!(config.color_scheme, FlameGraphColorScheme::Hot));
1246 assert_eq!(config.min_frame_width, 1);
1247 assert!(config.show_function_names);
1248 assert!(!config.reverse);
1249 }
1250
1251 #[test]
1252 fn test_flame_graph_color_schemes() {
1253 let schemes = vec![
1254 FlameGraphColorScheme::Hot,
1255 FlameGraphColorScheme::Cold,
1256 FlameGraphColorScheme::Rainbow,
1257 FlameGraphColorScheme::Aqua,
1258 FlameGraphColorScheme::Orange,
1259 FlameGraphColorScheme::Red,
1260 FlameGraphColorScheme::Green,
1261 FlameGraphColorScheme::Blue,
1262 FlameGraphColorScheme::Custom(vec!["#ff0000".to_string(), "#00ff00".to_string()]),
1263 ];
1264
1265 for scheme in schemes {
1266 let config = FlameGraphConfig {
1267 enabled: true,
1268 width: 1024,
1269 height: 768,
1270 color_scheme: scheme,
1271 min_frame_width: 1,
1272 show_function_names: true,
1273 reverse: false,
1274 };
1275 let serialized = serde_json::to_string(&config);
1277 assert!(serialized.is_ok());
1278 }
1279 }
1280
1281 #[test]
1282 fn test_performance_thresholds() {
1283 let thresholds = PerformanceThresholds {
1284 cpu_threshold_percent: 80.0,
1285 memory_threshold_mb: 512.0,
1286 function_call_threshold_ms: 100,
1287 async_task_threshold_ms: 200,
1288 allocation_threshold_bytes: 1024,
1289 };
1290
1291 assert_eq!(thresholds.cpu_threshold_percent, 80.0);
1292 assert_eq!(thresholds.memory_threshold_mb, 512.0);
1293 assert_eq!(thresholds.function_call_threshold_ms, 100);
1294 assert_eq!(thresholds.async_task_threshold_ms, 200);
1295 assert_eq!(thresholds.allocation_threshold_bytes, 1024);
1296 }
1297
1298 #[test]
1299 fn test_cpu_sample_creation() {
1300 let sample = CpuSample {
1301 timestamp: Utc::now(),
1302 stack_trace: vec![StackFrame {
1303 function_name: "main".to_string(),
1304 module_name: Some("app".to_string()),
1305 file_name: Some("/src/main.rs".to_string()),
1306 line_number: Some(10),
1307 address: Some(0x12345678),
1308 offset: Some(0x100),
1309 }],
1310 cpu_usage: 45.2,
1311 thread_id: 12345,
1312 process_id: 98765,
1313 };
1314
1315 assert_eq!(sample.stack_trace.len(), 1);
1316 assert_eq!(sample.cpu_usage, 45.2);
1317 assert_eq!(sample.thread_id, 12345);
1318 assert_eq!(sample.process_id, 98765);
1319 assert_eq!(sample.stack_trace[0].function_name, "main");
1320 }
1321
1322 #[test]
1323 fn test_memory_snapshot_creation() {
1324 let snapshot = MemorySnapshot {
1325 timestamp: Utc::now(),
1326 total_memory_bytes: 1024 * 1024,
1327 heap_memory_bytes: 512 * 1024,
1328 stack_memory_bytes: 512 * 1024,
1329 allocation_count: 100,
1330 allocations_by_size: std::collections::HashMap::new(),
1331 allocations_by_location: std::collections::HashMap::new(),
1332 };
1333
1334 assert_eq!(snapshot.total_memory_bytes, 1024 * 1024);
1335 assert_eq!(snapshot.heap_memory_bytes, 512 * 1024);
1336 assert_eq!(snapshot.allocation_count, 100);
1337 }
1338
1339 #[test]
1340 fn test_async_task_profile() {
1341 let task_id = Uuid::new_v4();
1342 let profile = AsyncTaskProfile {
1343 task_id,
1344 task_name: "test_task".to_string(),
1345 spawn_time: Utc::now(),
1346 completion_time: None,
1347 duration_ms: None,
1348 state: AsyncTaskState::Running,
1349 cpu_time_ms: 100,
1350 memory_bytes: 1024,
1351 yield_count: 5,
1352 parent_task_id: None,
1353 };
1354
1355 assert_eq!(profile.task_id, task_id);
1356 assert_eq!(profile.task_name, "test_task");
1357 assert_eq!(profile.cpu_time_ms, 100);
1358 assert_eq!(profile.memory_bytes, 1024);
1359 assert_eq!(profile.yield_count, 5);
1360 assert!(matches!(profile.state, AsyncTaskState::Running));
1361 }
1362
1363 #[test]
1364 fn test_function_call_profile() {
1365 let profile = FunctionCallProfile {
1366 function_name: "test_function".to_string(),
1367 call_count: 2,
1368 total_time_us: 3000,
1369 average_time_us: 1500.0,
1370 min_time_us: 1000,
1371 max_time_us: 2000,
1372 percentiles: HashMap::new(),
1373 call_history: VecDeque::new(),
1374 };
1375
1376 assert_eq!(profile.function_name, "test_function");
1377 assert_eq!(profile.call_count, 2);
1378 assert_eq!(profile.total_time_us, 3000);
1379 assert_eq!(profile.min_time_us, 1000);
1380 assert_eq!(profile.max_time_us, 2000);
1381 assert_eq!(profile.average_time_us, 1500.0);
1382 }
1383
1384 #[test]
1385 fn test_profiling_session_types() {
1386 let manual = ProfilingSessionType::Manual;
1387 let scheduled = ProfilingSessionType::Scheduled;
1388 let triggered = ProfilingSessionType::Triggered;
1389 let continuous = ProfilingSessionType::Continuous;
1390
1391 assert!(matches!(manual, ProfilingSessionType::Manual));
1392 assert!(matches!(scheduled, ProfilingSessionType::Scheduled));
1393 assert!(matches!(triggered, ProfilingSessionType::Triggered));
1394 assert!(matches!(continuous, ProfilingSessionType::Continuous));
1395 }
1396
1397 #[test]
1398 fn test_async_task_status() {
1399 let statuses = vec![
1400 AsyncTaskState::Running,
1401 AsyncTaskState::Suspended,
1402 AsyncTaskState::Completed,
1403 AsyncTaskState::Failed,
1404 AsyncTaskState::Cancelled,
1405 ];
1406
1407 for status in statuses {
1408 let serialized = serde_json::to_string(&status);
1410 assert!(serialized.is_ok());
1411
1412 let deserialized: Result<AsyncTaskState, _> =
1414 serde_json::from_str(&serialized.unwrap());
1415 assert!(deserialized.is_ok());
1416 }
1417 }
1418
1419 #[test]
1420 fn test_hotspot_types() {
1421 let types = vec![
1422 HotspotType::CpuIntensive,
1423 HotspotType::MemoryIntensive,
1424 HotspotType::IoBlocking,
1425 HotspotType::LockContention,
1426 HotspotType::AsyncOverhead,
1427 HotspotType::GarbageCollection,
1428 HotspotType::SystemCall,
1429 HotspotType::NetworkIo,
1430 HotspotType::DatabaseQuery,
1431 HotspotType::FileIo,
1432 ];
1433
1434 for hotspot_type in types {
1435 let serialized = serde_json::to_string(&hotspot_type);
1436 assert!(serialized.is_ok());
1437 }
1438 }
1439
1440 #[test]
1441 fn test_hotspot_severities() {
1442 let severities = vec![
1443 HotspotSeverity::Critical,
1444 HotspotSeverity::High,
1445 HotspotSeverity::Medium,
1446 HotspotSeverity::Low,
1447 HotspotSeverity::Info,
1448 ];
1449
1450 for severity in severities {
1451 let serialized = serde_json::to_string(&severity);
1452 assert!(serialized.is_ok());
1453 }
1454 }
1455
1456 #[tokio::test]
1457 async fn test_disabled_profiler() {
1458 let config = ProfilingConfig {
1459 enabled: false,
1460 ..Default::default()
1461 };
1462 let profiler = PerformanceProfiler::new(config);
1463
1464 let result = profiler
1466 .start_session("test".to_string(), ProfilingSessionType::Manual)
1467 .await;
1468
1469 assert!(result.is_err());
1470 if let Err(ProfilingError::Disabled) = result {
1471 } else {
1473 panic!("Expected ProfilingError::Disabled");
1474 }
1475 }
1476
1477 #[tokio::test]
1478 async fn test_multiple_function_calls() {
1479 let config = ProfilingConfig::default();
1480 let profiler = PerformanceProfiler::new(config);
1481
1482 profiler
1484 .record_function_call("test_func".to_string(), 1000)
1485 .await;
1486 profiler
1487 .record_function_call("test_func".to_string(), 2000)
1488 .await;
1489 profiler
1490 .record_function_call("test_func".to_string(), 1500)
1491 .await;
1492
1493 let function_calls = profiler.function_calls.read().await;
1494 let profile = function_calls.get("test_func").unwrap();
1495
1496 assert_eq!(profile.call_count, 3);
1497 assert_eq!(profile.total_time_us, 4500);
1498 assert_eq!(profile.min_time_us, 1000);
1499 assert_eq!(profile.max_time_us, 2000);
1500 assert_eq!(profile.average_time_us, 1500.0);
1501 }
1502
1503 #[tokio::test]
1504 async fn test_get_statistics() {
1505 let config = ProfilingConfig::default();
1506 let profiler = PerformanceProfiler::new(config);
1507
1508 profiler
1510 .record_function_call("func1".to_string(), 1000)
1511 .await;
1512 profiler
1513 .record_function_call("func2".to_string(), 2000)
1514 .await;
1515
1516 let stats = profiler.get_statistics().await;
1517 assert_eq!(stats.function_calls_tracked, 2);
1518 assert_eq!(stats.total_samples, 0); }
1520
1521 #[tokio::test]
1522 async fn test_session_without_current() {
1523 let config = ProfilingConfig::default();
1524 let profiler = PerformanceProfiler::new(config);
1525
1526 let result = profiler.stop_session().await;
1528 assert!(result.is_err());
1529
1530 if let Err(ProfilingError::NoActiveSession) = result {
1531 } else {
1533 panic!("Expected ProfilingError::NoActiveSession");
1534 }
1535 }
1536
1537 #[test]
1538 fn test_config_serialization() {
1539 let config = ProfilingConfig::default();
1540
1541 let serialized = serde_json::to_string(&config);
1543 assert!(serialized.is_ok());
1544
1545 let deserialized: Result<ProfilingConfig, _> = serde_json::from_str(&serialized.unwrap());
1547 assert!(deserialized.is_ok());
1548
1549 let restored_config = deserialized.unwrap();
1550 assert_eq!(config.enabled, restored_config.enabled);
1551 assert_eq!(
1552 config.cpu_profiling.enabled,
1553 restored_config.cpu_profiling.enabled
1554 );
1555 }
1556
1557 #[test]
1558 fn test_stack_frame_creation() {
1559 let frame = StackFrame {
1560 function_name: "test_function".to_string(),
1561 module_name: Some("test_module".to_string()),
1562 file_name: Some("/path/to/file.rs".to_string()),
1563 line_number: Some(42),
1564 address: Some(0xDEADBEEF),
1565 offset: Some(0x100),
1566 };
1567
1568 assert_eq!(frame.function_name, "test_function");
1569 assert_eq!(frame.module_name, Some("test_module".to_string()));
1570 assert_eq!(frame.file_name, Some("/path/to/file.rs".to_string()));
1571 assert_eq!(frame.line_number, Some(42));
1572 assert_eq!(frame.address, Some(0xDEADBEEF));
1573 assert_eq!(frame.offset, Some(0x100));
1574 }
1575}