1use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
15pub enum BottleneckKind {
16 Cpu,
18 Memory,
20 Io,
22 Network,
24 Lock,
26 Allocation,
28}
29
30impl BottleneckKind {
31 pub fn description(&self) -> &'static str {
33 match self {
34 Self::Cpu => "CPU-bound bottleneck",
35 Self::Memory => "Memory-bound bottleneck",
36 Self::Io => "I/O-bound bottleneck",
37 Self::Network => "Network-bound bottleneck",
38 Self::Lock => "Lock contention bottleneck",
39 Self::Allocation => "Allocation pattern bottleneck",
40 }
41 }
42
43 pub fn default_severity_threshold(&self) -> f64 {
45 match self {
46 Self::Cpu => 0.8,
47 Self::Memory => 0.85,
48 Self::Io => 0.75,
49 Self::Network => 0.7,
50 Self::Lock => 0.9,
51 Self::Allocation => 0.8,
52 }
53 }
54}
55
56#[derive(Debug, Clone, Serialize, Deserialize)]
58pub struct PerformanceIssue {
59 pub bottleneck_type: BottleneckKind,
61 pub task_id: u64,
63 pub task_name: String,
65 pub severity: f64,
67 pub description: String,
69 pub suggestion: String,
71 pub timestamp_ms: u64,
73 pub metrics: BottleneckMetrics,
75}
76
77#[derive(Debug, Clone, Serialize, Deserialize)]
79pub struct BottleneckMetrics {
80 pub cpu_usage_percent: f64,
82 pub memory_usage_percent: f64,
84 pub io_bytes_processed: u64,
86 pub network_bytes_transferred: u64,
88 pub lock_wait_time_ns: u64,
90 pub allocation_frequency: f64,
92 pub average_allocation_size: f64,
94}
95
96#[derive(Debug, Clone, Serialize, Deserialize)]
101pub struct TaskMetrics {
102 pub task_id: u64,
104 pub task_name: String,
106 pub cpu_usage_percent: f64,
108 pub memory_usage_percent: f64,
110 pub io_bytes_processed: u64,
112 pub network_bytes_transferred: u64,
114 pub allocation_frequency: f64,
116 pub average_allocation_size: f64,
118}
119
120impl Default for BottleneckMetrics {
121 fn default() -> Self {
122 Self {
123 cpu_usage_percent: 0.0,
124 memory_usage_percent: 0.0,
125 io_bytes_processed: 0,
126 network_bytes_transferred: 0,
127 lock_wait_time_ns: 0,
128 allocation_frequency: 0.0,
129 average_allocation_size: 0.0,
130 }
131 }
132}
133
134#[derive(Debug, Clone, Serialize, Deserialize)]
136pub struct BottleneckConfig {
137 pub enable_cpu_detection: bool,
139 pub enable_memory_detection: bool,
141 pub enable_io_detection: bool,
143 pub enable_network_detection: bool,
145 pub enable_lock_detection: bool,
147 pub enable_allocation_detection: bool,
149 pub severity_thresholds: HashMap<BottleneckKind, f64>,
151 pub min_severity_to_report: f64,
153}
154
155impl Default for BottleneckConfig {
156 fn default() -> Self {
157 let mut severity_thresholds = HashMap::new();
158 severity_thresholds.insert(BottleneckKind::Cpu, 0.8);
159 severity_thresholds.insert(BottleneckKind::Memory, 0.85);
160 severity_thresholds.insert(BottleneckKind::Io, 0.75);
161 severity_thresholds.insert(BottleneckKind::Network, 0.7);
162 severity_thresholds.insert(BottleneckKind::Lock, 0.9);
163 severity_thresholds.insert(BottleneckKind::Allocation, 0.8);
164
165 Self {
166 enable_cpu_detection: true,
167 enable_memory_detection: true,
168 enable_io_detection: true,
169 enable_network_detection: true,
170 enable_lock_detection: true,
171 enable_allocation_detection: true,
172 severity_thresholds,
173 min_severity_to_report: 0.5,
174 }
175 }
176}
177
178pub struct BottleneckAnalyzer {
180 config: BottleneckConfig,
181}
182
183impl BottleneckAnalyzer {
184 pub fn new() -> Self {
186 Self {
187 config: BottleneckConfig::default(),
188 }
189 }
190
191 pub fn with_config(config: BottleneckConfig) -> Self {
193 Self { config }
194 }
195
196 pub fn analyze_task(&self, metrics: &TaskMetrics) -> Vec<PerformanceIssue> {
198 let mut bottlenecks = Vec::new();
199 let timestamp_ms = Self::current_timestamp_ms();
200
201 if self.config.enable_cpu_detection {
202 if let Some(bottleneck) = self.detect_cpu_bottleneck(
203 metrics.task_id,
204 &metrics.task_name,
205 metrics.cpu_usage_percent,
206 timestamp_ms,
207 ) {
208 bottlenecks.push(bottleneck);
209 }
210 }
211
212 if self.config.enable_memory_detection {
213 if let Some(bottleneck) = self.detect_memory_bottleneck(
214 metrics.task_id,
215 &metrics.task_name,
216 metrics.memory_usage_percent,
217 timestamp_ms,
218 ) {
219 bottlenecks.push(bottleneck);
220 }
221 }
222
223 if self.config.enable_io_detection {
224 if let Some(bottleneck) = self.detect_io_bottleneck(
225 metrics.task_id,
226 &metrics.task_name,
227 metrics.io_bytes_processed,
228 timestamp_ms,
229 ) {
230 bottlenecks.push(bottleneck);
231 }
232 }
233
234 if self.config.enable_network_detection {
235 if let Some(bottleneck) = self.detect_network_bottleneck(
236 metrics.task_id,
237 &metrics.task_name,
238 metrics.network_bytes_transferred,
239 timestamp_ms,
240 ) {
241 bottlenecks.push(bottleneck);
242 }
243 }
244
245 if self.config.enable_allocation_detection {
246 if let Some(bottleneck) = self.detect_allocation_bottleneck(
247 metrics.task_id,
248 &metrics.task_name,
249 metrics.allocation_frequency,
250 metrics.average_allocation_size,
251 timestamp_ms,
252 ) {
253 bottlenecks.push(bottleneck);
254 }
255 }
256
257 bottlenecks
258 .into_iter()
259 .filter(|b| b.severity >= self.config.min_severity_to_report)
260 .collect()
261 }
262
263 fn detect_cpu_bottleneck(
265 &self,
266 task_id: u64,
267 task_name: &str,
268 cpu_usage_percent: f64,
269 timestamp_ms: u64,
270 ) -> Option<PerformanceIssue> {
271 let threshold = *self
272 .config
273 .severity_thresholds
274 .get(&BottleneckKind::Cpu)
275 .unwrap_or(&BottleneckKind::Cpu.default_severity_threshold());
276
277 if cpu_usage_percent < threshold {
278 return None;
279 }
280
281 let severity = (cpu_usage_percent - threshold) / (100.0 - threshold);
282
283 let description = format!(
284 "CPU usage is {:.1}%, exceeding threshold of {:.1}%",
285 cpu_usage_percent,
286 threshold * 100.0
287 );
288
289 let suggestion = if severity > 0.8 {
290 "Critical CPU bottleneck: Consider parallelizing work, optimizing algorithms, or reducing computational complexity"
291 } else if severity > 0.5 {
292 "Significant CPU bottleneck: Profile hot paths, optimize critical sections, and consider caching"
293 } else {
294 "Moderate CPU bottleneck: Review algorithm efficiency and consider performance optimizations"
295 };
296
297 Some(PerformanceIssue {
298 bottleneck_type: BottleneckKind::Cpu,
299 task_id,
300 task_name: task_name.to_string(),
301 severity,
302 description,
303 suggestion: suggestion.to_string(),
304 timestamp_ms,
305 metrics: BottleneckMetrics {
306 cpu_usage_percent,
307 ..Default::default()
308 },
309 })
310 }
311
312 fn detect_memory_bottleneck(
314 &self,
315 task_id: u64,
316 task_name: &str,
317 memory_usage_percent: f64,
318 timestamp_ms: u64,
319 ) -> Option<PerformanceIssue> {
320 let threshold = *self
321 .config
322 .severity_thresholds
323 .get(&BottleneckKind::Memory)
324 .unwrap_or(&BottleneckKind::Memory.default_severity_threshold());
325
326 if memory_usage_percent < threshold {
327 return None;
328 }
329
330 let severity = (memory_usage_percent - threshold) / (100.0 - threshold);
331
332 let description = format!(
333 "Memory usage is {:.1}%, exceeding threshold of {:.1}%",
334 memory_usage_percent,
335 threshold * 100.0
336 );
337
338 let suggestion = if severity > 0.8 {
339 "Critical memory bottleneck: Implement memory pooling, reduce memory footprint, or increase available memory"
340 } else if severity > 0.5 {
341 "Significant memory bottleneck: Optimize data structures, reduce allocations, and implement memory reuse"
342 } else {
343 "Moderate memory bottleneck: Review memory usage patterns and consider optimization strategies"
344 };
345
346 Some(PerformanceIssue {
347 bottleneck_type: BottleneckKind::Memory,
348 task_id,
349 task_name: task_name.to_string(),
350 severity,
351 description,
352 suggestion: suggestion.to_string(),
353 timestamp_ms,
354 metrics: BottleneckMetrics {
355 memory_usage_percent,
356 ..Default::default()
357 },
358 })
359 }
360
361 fn detect_io_bottleneck(
363 &self,
364 task_id: u64,
365 task_name: &str,
366 io_bytes_processed: u64,
367 timestamp_ms: u64,
368 ) -> Option<PerformanceIssue> {
369 let threshold = *self
370 .config
371 .severity_thresholds
372 .get(&BottleneckKind::Io)
373 .unwrap_or(&BottleneckKind::Io.default_severity_threshold());
374
375 let io_mb = io_bytes_processed as f64 / 1_048_576.0;
376
377 if io_mb < threshold * 100.0 {
378 return None;
379 }
380
381 let severity = ((io_mb - threshold * 100.0) / (1_000.0 - threshold * 100.0)).min(1.0);
382
383 let description =
384 format!("I/O throughput is {io_mb:.1} MB, indicating potential bottleneck");
385
386 let suggestion = if severity > 0.8 {
387 "Critical I/O bottleneck: Implement buffering, use asynchronous I/O, or optimize access patterns"
388 } else if severity > 0.5 {
389 "Significant I/O bottleneck: Consider batching operations, using memory-mapped files, or optimizing disk layout"
390 } else {
391 "Moderate I/O bottleneck: Review I/O patterns and consider caching strategies"
392 };
393
394 Some(PerformanceIssue {
395 bottleneck_type: BottleneckKind::Io,
396 task_id,
397 task_name: task_name.to_string(),
398 severity,
399 description,
400 suggestion: suggestion.to_string(),
401 timestamp_ms,
402 metrics: BottleneckMetrics {
403 io_bytes_processed,
404 ..Default::default()
405 },
406 })
407 }
408
409 fn detect_network_bottleneck(
411 &self,
412 task_id: u64,
413 task_name: &str,
414 network_bytes_transferred: u64,
415 timestamp_ms: u64,
416 ) -> Option<PerformanceIssue> {
417 let threshold = *self
418 .config
419 .severity_thresholds
420 .get(&BottleneckKind::Network)
421 .unwrap_or(&BottleneckKind::Network.default_severity_threshold());
422
423 let network_mb = network_bytes_transferred as f64 / 1_048_576.0;
424
425 if network_mb < threshold * 50.0 {
426 return None;
427 }
428
429 let severity = ((network_mb - threshold * 50.0) / (500.0 - threshold * 50.0)).min(1.0);
430
431 let description = format!(
432 "Network transfer is {:.1} MB, indicating potential bottleneck",
433 network_mb
434 );
435
436 let suggestion = if severity > 0.8 {
437 "Critical network bottleneck: Implement compression, use connection pooling, or optimize data serialization"
438 } else if severity > 0.5 {
439 "Significant network bottleneck: Consider batching requests, using HTTP/2, or implementing caching"
440 } else {
441 "Moderate network bottleneck: Review network usage patterns and consider optimization strategies"
442 };
443
444 Some(PerformanceIssue {
445 bottleneck_type: BottleneckKind::Network,
446 task_id,
447 task_name: task_name.to_string(),
448 severity,
449 description,
450 suggestion: suggestion.to_string(),
451 timestamp_ms,
452 metrics: BottleneckMetrics {
453 network_bytes_transferred,
454 ..Default::default()
455 },
456 })
457 }
458
459 fn detect_allocation_bottleneck(
461 &self,
462 task_id: u64,
463 task_name: &str,
464 allocation_frequency: f64,
465 average_allocation_size: f64,
466 timestamp_ms: u64,
467 ) -> Option<PerformanceIssue> {
468 let threshold = *self
469 .config
470 .severity_thresholds
471 .get(&BottleneckKind::Allocation)
472 .unwrap_or(&BottleneckKind::Allocation.default_severity_threshold());
473
474 let severity = if allocation_frequency > 1000.0 {
475 (allocation_frequency - 1000.0) / 9000.0
476 } else if average_allocation_size < 1024.0 && allocation_frequency > 100.0 {
477 0.7
478 } else {
479 0.0
480 };
481
482 if severity < threshold {
483 return None;
484 }
485
486 let description = if allocation_frequency > 1000.0 {
487 format!(
488 "High allocation frequency: {:.0} allocations/second",
489 allocation_frequency
490 )
491 } else {
492 format!(
493 "Small frequent allocations: {:.0} bytes avg, {:.0} allocations/second",
494 average_allocation_size, allocation_frequency
495 )
496 };
497
498 let suggestion = if severity > 0.8 {
499 "Critical allocation bottleneck: Implement object pooling, use arena allocators, or redesign to reduce allocations"
500 } else if severity > 0.5 {
501 "Significant allocation bottleneck: Consider allocation pooling, reuse buffers, or optimize allocation patterns"
502 } else {
503 "Moderate allocation bottleneck: Review allocation patterns and consider memory reuse strategies"
504 };
505
506 Some(PerformanceIssue {
507 bottleneck_type: BottleneckKind::Allocation,
508 task_id,
509 task_name: task_name.to_string(),
510 severity,
511 description,
512 suggestion: suggestion.to_string(),
513 timestamp_ms,
514 metrics: BottleneckMetrics {
515 allocation_frequency,
516 average_allocation_size,
517 ..Default::default()
518 },
519 })
520 }
521
522 fn current_timestamp_ms() -> u64 {
524 use std::time::{SystemTime, UNIX_EPOCH};
525 SystemTime::now()
526 .duration_since(UNIX_EPOCH)
527 .unwrap_or_default()
528 .as_millis() as u64
529 }
530
531 pub fn config(&self) -> &BottleneckConfig {
533 &self.config
534 }
535
536 pub fn set_config(&mut self, config: BottleneckConfig) {
538 self.config = config;
539 }
540}
541
542impl Default for BottleneckAnalyzer {
543 fn default() -> Self {
544 Self::new()
545 }
546}
547
548#[cfg(test)]
549mod tests {
550 use super::*;
551
552 #[test]
553 fn test_bottleneck_type_description() {
554 assert_eq!(BottleneckKind::Cpu.description(), "CPU-bound bottleneck");
555 assert_eq!(
556 BottleneckKind::Memory.description(),
557 "Memory-bound bottleneck"
558 );
559 assert_eq!(BottleneckKind::Io.description(), "I/O-bound bottleneck");
560 assert_eq!(
561 BottleneckKind::Network.description(),
562 "Network-bound bottleneck"
563 );
564 assert_eq!(
565 BottleneckKind::Lock.description(),
566 "Lock contention bottleneck"
567 );
568 assert_eq!(
569 BottleneckKind::Allocation.description(),
570 "Allocation pattern bottleneck"
571 );
572 }
573
574 #[test]
575 fn test_bottleneck_analyzer_creation() {
576 let analyzer = BottleneckAnalyzer::new();
577 assert!(analyzer.config().enable_cpu_detection);
578 assert!(analyzer.config().enable_memory_detection);
579 }
580
581 #[test]
582 fn test_cpu_bottleneck_detection() {
583 let analyzer = BottleneckAnalyzer::new();
584 let metrics = TaskMetrics {
585 task_id: 1,
586 task_name: "test_task".to_string(),
587 cpu_usage_percent: 90.0,
588 memory_usage_percent: 50.0,
589 io_bytes_processed: 0,
590 network_bytes_transferred: 0,
591 allocation_frequency: 0.0,
592 average_allocation_size: 0.0,
593 };
594 let bottlenecks = analyzer.analyze_task(&metrics);
595
596 assert!(!bottlenecks.is_empty());
597 assert_eq!(bottlenecks[0].bottleneck_type, BottleneckKind::Cpu);
598 assert!(bottlenecks[0].severity > 0.0);
599 }
600
601 #[test]
602 fn test_memory_bottleneck_detection() {
603 let analyzer = BottleneckAnalyzer::new();
604 let metrics = TaskMetrics {
605 task_id: 1,
606 task_name: "test_task".to_string(),
607 cpu_usage_percent: 50.0,
608 memory_usage_percent: 90.0,
609 io_bytes_processed: 0,
610 network_bytes_transferred: 0,
611 allocation_frequency: 0.0,
612 average_allocation_size: 0.0,
613 };
614 let bottlenecks = analyzer.analyze_task(&metrics);
615
616 assert!(!bottlenecks.is_empty());
617 assert_eq!(bottlenecks[0].bottleneck_type, BottleneckKind::Memory);
618 assert!(bottlenecks[0].severity > 0.0);
619 }
620
621 #[test]
622 fn test_io_bottleneck_detection() {
623 let analyzer = BottleneckAnalyzer::new();
624 let metrics = TaskMetrics {
625 task_id: 1,
626 task_name: "test_task".to_string(),
627 cpu_usage_percent: 50.0,
628 memory_usage_percent: 50.0,
629 io_bytes_processed: 750_000_000,
630 network_bytes_transferred: 0,
631 allocation_frequency: 0.0,
632 average_allocation_size: 0.0,
633 };
634 let bottlenecks = analyzer.analyze_task(&metrics);
635
636 assert!(!bottlenecks.is_empty());
637 assert_eq!(bottlenecks[0].bottleneck_type, BottleneckKind::Io);
638 assert!(bottlenecks[0].severity > 0.0);
639 }
640
641 #[test]
642 fn test_network_bottleneck_detection() {
643 let analyzer = BottleneckAnalyzer::new();
644 let metrics = TaskMetrics {
645 task_id: 1,
646 task_name: "test_task".to_string(),
647 cpu_usage_percent: 50.0,
648 memory_usage_percent: 50.0,
649 io_bytes_processed: 0,
650 network_bytes_transferred: 350_000_000, allocation_frequency: 0.0,
653 average_allocation_size: 0.0,
654 };
655 let bottlenecks = analyzer.analyze_task(&metrics);
656
657 assert!(!bottlenecks.is_empty());
658 assert_eq!(bottlenecks[0].bottleneck_type, BottleneckKind::Network);
659 assert!(bottlenecks[0].severity > 0.0);
660 }
661
662 #[test]
663 fn test_allocation_bottleneck_detection() {
664 let analyzer = BottleneckAnalyzer::new();
665 let metrics = TaskMetrics {
666 task_id: 1,
667 task_name: "test_task".to_string(),
668 cpu_usage_percent: 50.0,
669 memory_usage_percent: 50.0,
670 io_bytes_processed: 0,
671 network_bytes_transferred: 0,
672 allocation_frequency: 9000.0,
674 average_allocation_size: 512.0,
675 };
676 let bottlenecks = analyzer.analyze_task(&metrics);
677
678 assert!(!bottlenecks.is_empty());
679 assert_eq!(bottlenecks[0].bottleneck_type, BottleneckKind::Allocation);
680 assert!(bottlenecks[0].severity > 0.0);
681 }
682
683 #[test]
684 fn test_no_bottleneck_below_threshold() {
685 let analyzer = BottleneckAnalyzer::new();
686 let metrics = TaskMetrics {
687 task_id: 1,
688 task_name: "test_task".to_string(),
689 cpu_usage_percent: 50.0,
690 memory_usage_percent: 50.0,
691 io_bytes_processed: 0,
692 network_bytes_transferred: 0,
693 allocation_frequency: 0.0,
694 average_allocation_size: 0.0,
695 };
696 let bottlenecks = analyzer.analyze_task(&metrics);
697
698 assert!(bottlenecks.is_empty());
699 }
700
701 #[test]
702 fn test_custom_config() {
703 let config = BottleneckConfig {
704 min_severity_to_report: 0.9,
705 ..Default::default()
706 };
707 let analyzer = BottleneckAnalyzer::with_config(config);
708
709 let metrics = TaskMetrics {
710 task_id: 1,
711 task_name: "test_task".to_string(),
712 cpu_usage_percent: 85.0,
713 memory_usage_percent: 50.0,
714 io_bytes_processed: 0,
715 network_bytes_transferred: 0,
716 allocation_frequency: 0.0,
717 average_allocation_size: 0.0,
718 };
719 let bottlenecks = analyzer.analyze_task(&metrics);
720
721 assert!(bottlenecks.is_empty());
722 }
723
724 #[test]
725 fn test_bottleneck_metrics_default() {
726 let metrics = BottleneckMetrics::default();
727 assert_eq!(metrics.cpu_usage_percent, 0.0);
728 assert_eq!(metrics.memory_usage_percent, 0.0);
729 assert_eq!(metrics.io_bytes_processed, 0);
730 assert_eq!(metrics.network_bytes_transferred, 0);
731 assert_eq!(metrics.lock_wait_time_ns, 0);
732 assert_eq!(metrics.allocation_frequency, 0.0);
733 assert_eq!(metrics.average_allocation_size, 0.0);
734 }
735}