1pub mod comparison;
7pub mod reporter;
8
9use crate::deadlock::ResourceId;
10use crate::task::TaskId;
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::time::Duration;
14
15pub use reporter::PerformanceReporter;
16
17#[derive(Debug, Clone, Serialize, Deserialize)]
19pub struct TaskMetrics {
20 pub task_id: TaskId,
22
23 pub name: String,
25
26 pub total_duration: Duration,
28
29 pub running_time: Duration,
31
32 pub blocked_time: Duration,
34
35 pub poll_count: u64,
37
38 pub await_count: u64,
40
41 pub await_durations: Vec<Duration>,
43
44 pub avg_poll_duration: Duration,
46
47 pub completed: bool,
49}
50
51impl TaskMetrics {
52 #[must_use]
54 pub fn new(task_id: TaskId, name: String) -> Self {
55 Self {
56 task_id,
57 name,
58 total_duration: Duration::ZERO,
59 running_time: Duration::ZERO,
60 blocked_time: Duration::ZERO,
61 poll_count: 0,
62 await_count: 0,
63 await_durations: Vec::new(),
64 avg_poll_duration: Duration::ZERO,
65 completed: false,
66 }
67 }
68
69 #[must_use]
71 pub fn efficiency(&self) -> f64 {
72 if self.total_duration.is_zero() {
73 return 0.0;
74 }
75 self.running_time.as_secs_f64() / self.total_duration.as_secs_f64()
76 }
77
78 #[must_use]
80 pub fn is_bottleneck(&self, threshold_ms: u64) -> bool {
81 self.total_duration.as_millis() > u128::from(threshold_ms)
82 }
83}
84
85#[derive(Debug, Clone, Serialize, Deserialize)]
87pub struct DurationStats {
88 pub min: Duration,
90
91 pub max: Duration,
93
94 pub mean: Duration,
96
97 pub median: Duration,
99
100 pub p95: Duration,
102
103 pub p99: Duration,
105
106 pub std_dev: f64,
108
109 pub count: usize,
111}
112
113impl DurationStats {
114 #[must_use]
116 pub fn from_durations(mut durations: Vec<Duration>) -> Self {
117 if durations.is_empty() {
118 return Self {
119 min: Duration::ZERO,
120 max: Duration::ZERO,
121 mean: Duration::ZERO,
122 median: Duration::ZERO,
123 p95: Duration::ZERO,
124 p99: Duration::ZERO,
125 std_dev: 0.0,
126 count: 0,
127 };
128 }
129
130 durations.sort();
131 let count = durations.len();
132
133 let min = durations[0];
134 let max = durations[count - 1];
135
136 let sum: Duration = durations.iter().copied().sum();
138 let mean = sum / count as u32;
139
140 let median = if count % 2 == 0 {
142 (durations[count / 2 - 1] + durations[count / 2]) / 2
143 } else {
144 durations[count / 2]
145 };
146
147 let p95_idx = (count as f64 * 0.95) as usize;
149 let p99_idx = (count as f64 * 0.99) as usize;
150 let p95 = durations[p95_idx.min(count - 1)];
151 let p99 = durations[p99_idx.min(count - 1)];
152
153 let mean_secs = mean.as_secs_f64();
155 let variance: f64 = durations
156 .iter()
157 .map(|d| {
158 let diff = d.as_secs_f64() - mean_secs;
159 diff * diff
160 })
161 .sum::<f64>()
162 / count as f64;
163 let std_dev = variance.sqrt();
164
165 Self {
166 min,
167 max,
168 mean,
169 median,
170 p95,
171 p99,
172 std_dev,
173 count,
174 }
175 }
176}
177
178#[derive(Debug, Clone)]
180pub struct HotPath {
181 pub path: String,
183
184 pub execution_count: u64,
186
187 pub total_time: Duration,
189
190 pub avg_time: Duration,
192}
193
194#[derive(Debug, Clone, Serialize, Deserialize)]
196pub struct LockContentionMetrics {
197 pub resource_id: ResourceId,
199
200 pub name: String,
202
203 pub acquire_attempts: u64,
205
206 pub successful_acquisitions: u64,
208
209 pub contention_count: u64,
211
212 pub total_wait_time: Duration,
214
215 pub avg_wait_time: Duration,
217
218 pub max_wait_time: Duration,
220
221 pub waiting_tasks: Vec<TaskId>,
223
224 pub contention_rate: f64,
226}
227
228impl LockContentionMetrics {
229 #[must_use]
231 pub fn new(resource_id: ResourceId, name: String) -> Self {
232 Self {
233 resource_id,
234 name,
235 acquire_attempts: 0,
236 successful_acquisitions: 0,
237 contention_count: 0,
238 total_wait_time: Duration::ZERO,
239 avg_wait_time: Duration::ZERO,
240 max_wait_time: Duration::ZERO,
241 waiting_tasks: Vec::new(),
242 contention_rate: 0.0,
243 }
244 }
245
246 pub fn record_wait(&mut self, wait_time: Duration, task_id: TaskId) {
248 self.contention_count += 1;
249 self.total_wait_time += wait_time;
250
251 if wait_time > self.max_wait_time {
252 self.max_wait_time = wait_time;
253 }
254
255 if !self.waiting_tasks.contains(&task_id) {
256 self.waiting_tasks.push(task_id);
257 }
258
259 self.update_averages();
260 }
261
262 pub fn record_acquisition(&mut self) {
264 self.successful_acquisitions += 1;
265 self.update_averages();
266 }
267
268 pub fn record_attempt(&mut self) {
270 self.acquire_attempts += 1;
271 }
272
273 fn update_averages(&mut self) {
275 if self.contention_count > 0 {
276 self.avg_wait_time = self.total_wait_time / self.contention_count as u32;
277 }
278
279 if self.successful_acquisitions > 0 {
280 self.contention_rate =
281 self.contention_count as f64 / self.successful_acquisitions as f64;
282 }
283 }
284
285 #[must_use]
287 pub fn is_highly_contended(&self, threshold: f64) -> bool {
288 self.contention_rate > threshold
289 }
290}
291
292pub struct Profiler {
294 task_metrics: HashMap<TaskId, TaskMetrics>,
296
297 hot_paths: HashMap<String, HotPath>,
299
300 lock_contention: HashMap<ResourceId, LockContentionMetrics>,
302
303 bottleneck_threshold: u64,
305
306 contention_threshold: f64,
308}
309
310impl Profiler {
311 #[must_use]
313 pub fn new() -> Self {
314 Self {
315 task_metrics: HashMap::new(),
316 hot_paths: HashMap::new(),
317 lock_contention: HashMap::new(),
318 bottleneck_threshold: 100, contention_threshold: 0.3, }
321 }
322
323 pub fn set_bottleneck_threshold(&mut self, threshold_ms: u64) {
325 self.bottleneck_threshold = threshold_ms;
326 }
327
328 pub fn set_contention_threshold(&mut self, threshold: f64) {
330 self.contention_threshold = threshold;
331 }
332
333 pub fn record_lock_wait(
335 &mut self,
336 resource_id: ResourceId,
337 name: String,
338 wait_time: Duration,
339 task_id: TaskId,
340 ) {
341 let metrics = self
342 .lock_contention
343 .entry(resource_id)
344 .or_insert_with(|| LockContentionMetrics::new(resource_id, name));
345
346 metrics.record_wait(wait_time, task_id);
347 }
348
349 pub fn record_lock_acquisition(&mut self, resource_id: ResourceId, name: String) {
351 let metrics = self
352 .lock_contention
353 .entry(resource_id)
354 .or_insert_with(|| LockContentionMetrics::new(resource_id, name));
355
356 metrics.record_acquisition();
357 }
358
359 pub fn record_lock_attempt(&mut self, resource_id: ResourceId, name: String) {
361 let metrics = self
362 .lock_contention
363 .entry(resource_id)
364 .or_insert_with(|| LockContentionMetrics::new(resource_id, name));
365
366 metrics.record_attempt();
367 }
368
369 #[must_use]
371 pub fn get_lock_metrics(&self, resource_id: &ResourceId) -> Option<&LockContentionMetrics> {
372 self.lock_contention.get(resource_id)
373 }
374
375 #[must_use]
377 pub fn all_lock_metrics(&self) -> Vec<&LockContentionMetrics> {
378 self.lock_contention.values().collect()
379 }
380
381 #[must_use]
383 pub fn most_contended_locks(&self, count: usize) -> Vec<&LockContentionMetrics> {
384 let mut metrics: Vec<_> = self.lock_contention.values().collect();
385 metrics.sort_by(|a, b| {
386 b.contention_rate
387 .partial_cmp(&a.contention_rate)
388 .unwrap_or(std::cmp::Ordering::Equal)
389 });
390 metrics.into_iter().take(count).collect()
391 }
392
393 #[must_use]
395 pub fn identify_highly_contended_locks(&self) -> Vec<&LockContentionMetrics> {
396 self.lock_contention
397 .values()
398 .filter(|m| m.is_highly_contended(self.contention_threshold))
399 .collect()
400 }
401
402 pub fn record_task(&mut self, metrics: TaskMetrics) {
404 let path = metrics.name.clone();
406 let hot_path = self
407 .hot_paths
408 .entry(path.clone())
409 .or_insert_with(|| HotPath {
410 path: path.clone(),
411 execution_count: 0,
412 total_time: Duration::ZERO,
413 avg_time: Duration::ZERO,
414 });
415
416 hot_path.execution_count += 1;
417 hot_path.total_time += metrics.total_duration;
418 hot_path.avg_time = hot_path.total_time / hot_path.execution_count as u32;
419
420 self.task_metrics.insert(metrics.task_id, metrics);
421 }
422
423 #[must_use]
425 pub fn get_task_metrics(&self, task_id: &TaskId) -> Option<&TaskMetrics> {
426 self.task_metrics.get(task_id)
427 }
428
429 #[must_use]
431 pub fn all_metrics(&self) -> Vec<&TaskMetrics> {
432 self.task_metrics.values().collect()
433 }
434
435 #[must_use]
437 pub fn identify_bottlenecks(&self) -> Vec<&TaskMetrics> {
438 self.task_metrics
439 .values()
440 .filter(|m| m.is_bottleneck(self.bottleneck_threshold))
441 .collect()
442 }
443
444 #[must_use]
446 pub fn get_hot_paths(&self) -> Vec<&HotPath> {
447 let mut paths: Vec<_> = self.hot_paths.values().collect();
448 paths.sort_by(|a, b| b.execution_count.cmp(&a.execution_count));
449 paths
450 }
451
452 #[must_use]
454 pub fn calculate_stats(&self) -> DurationStats {
455 let durations: Vec<Duration> = self
456 .task_metrics
457 .values()
458 .map(|m| m.total_duration)
459 .collect();
460
461 DurationStats::from_durations(durations)
462 }
463
464 #[must_use]
466 pub fn await_stats(&self) -> DurationStats {
467 let mut all_await_durations = Vec::new();
468
469 for metrics in self.task_metrics.values() {
470 all_await_durations.extend(metrics.await_durations.iter().copied());
471 }
472
473 DurationStats::from_durations(all_await_durations)
474 }
475
476 #[must_use]
478 pub fn slowest_tasks(&self, count: usize) -> Vec<&TaskMetrics> {
479 let mut metrics: Vec<_> = self.task_metrics.values().collect();
480 metrics.sort_by(|a, b| b.total_duration.cmp(&a.total_duration));
481 metrics.into_iter().take(count).collect()
482 }
483
484 #[must_use]
486 pub fn busiest_tasks(&self, count: usize) -> Vec<&TaskMetrics> {
487 let mut metrics: Vec<_> = self.task_metrics.values().collect();
488 metrics.sort_by(|a, b| b.poll_count.cmp(&a.poll_count));
489 metrics.into_iter().take(count).collect()
490 }
491
492 #[must_use]
494 pub fn least_efficient_tasks(&self, count: usize) -> Vec<&TaskMetrics> {
495 let mut metrics: Vec<_> = self.task_metrics.values().collect();
496 metrics.sort_by(|a, b| {
497 a.efficiency()
498 .partial_cmp(&b.efficiency())
499 .unwrap_or(std::cmp::Ordering::Equal)
500 });
501 metrics.into_iter().take(count).collect()
502 }
503
504 #[must_use]
506 pub fn create_snapshot(&self, run_id: String) -> comparison::PerformanceSnapshot {
507 let task_stats = self.calculate_stats();
508 let task_metrics = self.task_metrics.values().cloned().collect();
509 let lock_metrics = self.lock_contention.values().cloned().collect();
510
511 let total_tasks = self.task_metrics.len();
512 let total_execution_time: Duration =
513 self.task_metrics.values().map(|m| m.total_duration).sum();
514
515 let avg_task_duration = if total_tasks > 0 {
516 total_execution_time / total_tasks as u32
517 } else {
518 Duration::ZERO
519 };
520
521 comparison::PerformanceSnapshot {
522 timestamp: std::time::SystemTime::now()
523 .duration_since(std::time::UNIX_EPOCH)
524 .unwrap_or_default()
525 .as_secs(),
526 run_id,
527 task_stats,
528 task_metrics,
529 lock_metrics,
530 total_tasks,
531 avg_task_duration,
532 total_execution_time,
533 }
534 }
535}
536
537impl Default for Profiler {
538 fn default() -> Self {
539 Self::new()
540 }
541}
542
543#[cfg(test)]
544mod tests {
545 use super::*;
546
547 #[test]
548 fn test_duration_stats() {
549 let durations = vec![
550 Duration::from_millis(10),
551 Duration::from_millis(20),
552 Duration::from_millis(30),
553 Duration::from_millis(40),
554 Duration::from_millis(50),
555 ];
556
557 let stats = DurationStats::from_durations(durations);
558
559 assert_eq!(stats.min, Duration::from_millis(10));
560 assert_eq!(stats.max, Duration::from_millis(50));
561 assert_eq!(stats.median, Duration::from_millis(30));
562 assert_eq!(stats.count, 5);
563 }
564
565 #[test]
566 fn test_task_efficiency() {
567 let mut metrics = TaskMetrics::new(TaskId::new(), "test".to_string());
568 metrics.total_duration = Duration::from_millis(100);
569 metrics.running_time = Duration::from_millis(80);
570 metrics.blocked_time = Duration::from_millis(20);
571
572 let efficiency = metrics.efficiency();
573 assert!(
574 (efficiency - 0.8).abs() < 0.01,
575 "Expected efficiency ~0.8, got {efficiency}"
576 );
577 }
578
579 #[test]
580 fn test_bottleneck_detection() {
581 let mut metrics = TaskMetrics::new(TaskId::new(), "slow_task".to_string());
582 metrics.total_duration = Duration::from_millis(150);
583
584 assert!(metrics.is_bottleneck(100));
585 assert!(!metrics.is_bottleneck(200));
586 }
587}