memscope_rs/lockfree/
api.rs1use super::aggregator::LockfreeAggregator;
7use super::tracker::{finalize_thread_tracker, init_thread_tracker, SamplingConfig};
8use std::path::Path;
9use std::sync::atomic::{AtomicBool, Ordering};
10
11use super::comprehensive_export::export_comprehensive_analysis;
12use super::resource_integration::{
13 BottleneckType, ComprehensiveAnalysis, CorrelationMetrics, PerformanceInsights,
14};
15
16static TRACKING_ENABLED: AtomicBool = AtomicBool::new(false);
18use std::sync::OnceLock;
19static OUTPUT_DIRECTORY: OnceLock<std::path::PathBuf> = OnceLock::new();
20
21pub fn trace_all<P: AsRef<Path>>(output_dir: P) -> Result<(), Box<dyn std::error::Error>> {
41 let output_path = output_dir.as_ref().to_path_buf();
42
43 let _ = OUTPUT_DIRECTORY.set(output_path.clone());
45
46 if output_path.exists() {
48 std::fs::remove_dir_all(&output_path)?;
49 }
50 std::fs::create_dir_all(&output_path)?;
51
52 TRACKING_ENABLED.store(true, Ordering::SeqCst);
54
55 println!("๐ Lockfree tracking started: {}", output_path.display());
56
57 Ok(())
58}
59
60pub fn trace_thread<P: AsRef<Path>>(output_dir: P) -> Result<(), Box<dyn std::error::Error>> {
83 let output_path = output_dir.as_ref().to_path_buf();
84
85 if !output_path.exists() {
87 std::fs::create_dir_all(&output_path)?;
88 }
89
90 init_thread_tracker(&output_path, Some(SamplingConfig::demo()))?;
92
93 Ok(())
94}
95
96pub fn stop_tracing() -> Result<(), Box<dyn std::error::Error>> {
114 if !TRACKING_ENABLED.load(Ordering::SeqCst) {
115 return Ok(()); }
117
118 let _ = finalize_thread_tracker();
120
121 TRACKING_ENABLED.store(false, Ordering::SeqCst);
123
124 let output_dir = OUTPUT_DIRECTORY
126 .get()
127 .ok_or("Output directory not set")?
128 .clone();
129
130 generate_reports(&output_dir)?;
131
132 println!(
133 "๐ Tracking complete: {}/memory_report.html",
134 output_dir.display()
135 );
136
137 Ok(())
138}
139
140pub fn is_tracking() -> bool {
155 TRACKING_ENABLED.load(Ordering::SeqCst)
156}
157
158#[derive(Debug, Clone)]
163pub struct MemorySnapshot {
164 pub current_mb: f64,
166 pub peak_mb: f64,
168 pub allocations: u64,
170 pub deallocations: u64,
172 pub active_threads: usize,
174}
175
176pub fn memory_snapshot() -> MemorySnapshot {
194 MemorySnapshot {
197 current_mb: 0.0,
198 peak_mb: 0.0,
199 allocations: 0,
200 deallocations: 0,
201 active_threads: if TRACKING_ENABLED.load(Ordering::SeqCst) {
202 1
203 } else {
204 0
205 },
206 }
207}
208
209#[macro_export]
232macro_rules! auto_trace {
233 ($output_dir:expr, $block:block) => {{
234 $crate::lockfree::api::trace_all($output_dir)?;
235 let result = (|| $block)();
236 $crate::lockfree::api::stop_tracing()?;
237 result
238 }};
239}
240
241pub fn quick_trace<F, R>(f: F) -> R
263where
264 F: FnOnce() -> R,
265{
266 let temp_dir = std::env::temp_dir().join("memscope_lockfree_quick");
267
268 if trace_all(&temp_dir).is_err() {
270 return f(); }
272
273 let result = f();
275
276 if stop_tracing().is_ok() {
278 println!("๐ Quick trace completed - check {}", temp_dir.display());
279 }
280
281 result
282}
283
284fn generate_reports(output_dir: &Path) -> Result<(), Box<dyn std::error::Error>> {
295 let aggregator = LockfreeAggregator::new(output_dir.to_path_buf());
296 let analysis = aggregator.aggregate_all_threads()?;
297
298 let comprehensive_analysis = ComprehensiveAnalysis {
300 memory_analysis: analysis.clone(),
301 resource_timeline: Vec::new(), performance_insights: PerformanceInsights {
303 primary_bottleneck: BottleneckType::Balanced,
304 cpu_efficiency_score: 50.0,
305 memory_efficiency_score: 75.0,
306 io_efficiency_score: 60.0,
307 recommendations: vec![
308 "Consider using memory pools for frequent allocations".to_string()
309 ],
310 thread_performance_ranking: Vec::new(),
311 },
312 correlation_metrics: CorrelationMetrics {
313 memory_cpu_correlation: 0.4,
314 memory_gpu_correlation: 0.5,
315 memory_io_correlation: 0.3,
316 allocation_rate_vs_cpu_usage: 0.3,
317 deallocation_rate_vs_memory_pressure: 0.2,
318 },
319 };
320
321 export_comprehensive_analysis(&comprehensive_analysis, output_dir, "api_export")?;
322
323 let json_path = output_dir.join("memory_data.json");
325 aggregator.export_analysis(&analysis, &json_path)?;
326
327 print_analysis_summary(&analysis);
329
330 Ok(())
331}
332
333fn print_analysis_summary(analysis: &super::analysis::LockfreeAnalysis) {
338 println!("\n๐ Lockfree Memory Analysis:");
339 println!(" ๐งต Threads analyzed: {}", analysis.thread_stats.len());
340 println!(
341 " ๐ Peak memory: {:.1} MB",
342 analysis.summary.peak_memory_usage as f64 / (1024.0 * 1024.0)
343 );
344 println!(
345 " ๐ Total allocations: {}",
346 analysis.summary.total_allocations
347 );
348 println!(
349 " โฉ๏ธ Total deallocations: {}",
350 analysis.summary.total_deallocations
351 );
352
353 if analysis.summary.total_allocations > 0 {
354 let efficiency = analysis.summary.total_deallocations as f64
355 / analysis.summary.total_allocations as f64
356 * 100.0;
357 println!(" โก Memory efficiency: {:.1}%", efficiency);
358 }
359}
360
361#[cfg(test)]
362mod tests {
363 use super::*;
364 use std::fs;
365 use tempfile::TempDir;
366
367 fn create_test_dir() -> TempDir {
368 tempfile::tempdir().expect("Failed to create temp directory")
369 }
370
371 #[test]
372 fn test_trace_all_creates_directory() {
373 let temp_dir = create_test_dir();
374 let output_path = temp_dir.path().join("test_output");
375
376 let result = trace_all(&output_path);
377 assert!(result.is_ok());
378 assert!(output_path.exists());
379 assert!(TRACKING_ENABLED.load(Ordering::Relaxed));
380 }
381
382 #[test]
383 fn test_trace_all_cleans_existing_directory() {
384 let temp_dir = create_test_dir();
385 let output_path = temp_dir.path().join("test_output");
386
387 fs::create_dir_all(&output_path).unwrap();
389 let test_file = output_path.join("existing_file.txt");
390 fs::write(&test_file, "test content").unwrap();
391 assert!(test_file.exists());
392
393 let result = trace_all(&output_path);
395 assert!(result.is_ok());
396 assert!(output_path.exists());
397 assert!(!test_file.exists()); }
399
400 #[test]
401 fn test_stop_tracing() {
402 let temp_dir = create_test_dir();
403 let output_path = temp_dir.path().join("test_output");
404
405 let _ = trace_all(&output_path);
408
409 let result = stop_tracing();
411 assert!(result.is_ok());
412 }
413
414 #[test]
415 fn test_stop_tracing_without_start() {
416 TRACKING_ENABLED.store(false, Ordering::Relaxed);
418 let result = stop_tracing();
419 assert!(result.is_ok());
420 }
421
422 #[test]
423 fn test_trace_thread() {
424 let temp_dir = create_test_dir();
425 let output_path = temp_dir.path().join("test_output");
426
427 let result = trace_thread(&output_path);
428 assert!(result.is_ok());
429 assert!(output_path.exists());
430 }
431
432 #[test]
433 fn test_is_tracking() {
434 let temp_dir = create_test_dir();
435 let output_path = temp_dir.path().join("test_output");
436
437 let _initial_state = is_tracking();
440
441 let _ = trace_all(&output_path);
443 let _tracking_state = is_tracking();
444 let _ = stop_tracing();
445 let _final_state = is_tracking();
446
447 }
450
451 #[test]
452 fn test_memory_snapshot() {
453 let temp_dir = create_test_dir();
454 let output_path = temp_dir.path().join("test_output");
455
456 let snapshot1 = memory_snapshot();
458 assert!(snapshot1.active_threads <= 1); trace_all(&output_path).unwrap();
463 let snapshot2 = memory_snapshot();
464 assert_eq!(snapshot2.active_threads, 1); stop_tracing().unwrap();
468
469 let snapshot3 = memory_snapshot();
471 assert_eq!(snapshot3.active_threads, 0);
472 }
473
474 #[test]
475 fn test_quick_trace() {
476 let result = quick_trace(|| {
477 let _data = vec![0u8; 1024];
478 42
479 });
480 assert_eq!(result, 42);
481 }
482
483 #[test]
484 fn test_tracking_enabled_state() {
485 let temp_dir = create_test_dir();
486 let output_path = temp_dir.path().join("test_output");
487
488 let _initial_state = TRACKING_ENABLED.load(Ordering::Relaxed);
491
492 let _ = trace_all(&output_path);
494 let _enabled_state = TRACKING_ENABLED.load(Ordering::Relaxed);
495 let _ = stop_tracing();
496 let _final_state = TRACKING_ENABLED.load(Ordering::Relaxed);
497
498 }
501
502 #[test]
503 fn test_output_directory_persistence() {
504 let temp_dir = create_test_dir();
505 let output_path = temp_dir.path().join("test_output");
506
507 assert!(std::fs::create_dir_all(&output_path).is_ok());
510 assert!(output_path.exists());
511
512 let _ = trace_all(&output_path);
514 let _ = stop_tracing();
515 }
516
517 #[test]
518 fn test_sampling_config_creation() {
519 let config = SamplingConfig::default();
520
521 assert_eq!(config.large_allocation_rate, 1.0);
522 assert_eq!(config.medium_allocation_rate, 0.1);
523 assert_eq!(config.small_allocation_rate, 0.01);
524 assert_eq!(config.large_threshold, 10 * 1024);
525 assert_eq!(config.medium_threshold, 1024);
526 assert_eq!(config.frequency_threshold, 10);
527 }
528
529 #[test]
530 fn test_sampling_config_presets() {
531 let high_precision = SamplingConfig::high_precision();
532 assert!(high_precision.validate().is_ok());
533 assert_eq!(high_precision.large_allocation_rate, 1.0);
534 assert_eq!(high_precision.medium_allocation_rate, 0.5);
535
536 let performance_optimized = SamplingConfig::performance_optimized();
537 assert!(performance_optimized.validate().is_ok());
538 assert_eq!(performance_optimized.small_allocation_rate, 0.001);
539
540 let leak_detection = SamplingConfig::leak_detection();
541 assert!(leak_detection.validate().is_ok());
542 assert_eq!(leak_detection.medium_allocation_rate, 0.8);
543 }
544
545 #[test]
546 fn test_error_handling_invalid_path() {
547 let result = trace_all("");
549 let _ = result;
551 }
552
553 #[test]
554 fn test_memory_snapshot_structure() {
555 let snapshot = memory_snapshot();
556
557 assert!(snapshot.current_mb >= 0.0);
559 assert!(snapshot.peak_mb >= 0.0);
560 }
564}