memscope_rs/lockfree/
api.rs1use tracing::info;
7
8use super::aggregator::LockfreeAggregator;
9use super::tracker::{finalize_thread_tracker, init_thread_tracker, SamplingConfig};
10use std::path::Path;
11use std::sync::atomic::{AtomicBool, Ordering};
12
13use super::comprehensive_export::export_comprehensive_analysis;
14use super::resource_integration::{
15 BottleneckType, ComprehensiveAnalysis, CorrelationMetrics, PerformanceInsights,
16};
17
18static TRACKING_ENABLED: AtomicBool = AtomicBool::new(false);
20use std::sync::OnceLock;
21static OUTPUT_DIRECTORY: OnceLock<std::path::PathBuf> = OnceLock::new();
22
23pub fn trace_all<P: AsRef<Path>>(output_dir: P) -> Result<(), Box<dyn std::error::Error>> {
43 let output_path = output_dir.as_ref().to_path_buf();
44
45 let _ = OUTPUT_DIRECTORY.set(output_path.clone());
47
48 if output_path.exists() {
50 std::fs::remove_dir_all(&output_path)?;
51 }
52 std::fs::create_dir_all(&output_path)?;
53
54 TRACKING_ENABLED.store(true, Ordering::SeqCst);
56
57 println!("๐ Lockfree tracking started: {}", output_path.display());
58
59 Ok(())
60}
61
62pub fn trace_thread<P: AsRef<Path>>(output_dir: P) -> Result<(), Box<dyn std::error::Error>> {
85 let output_path = output_dir.as_ref().to_path_buf();
86
87 if !output_path.exists() {
89 std::fs::create_dir_all(&output_path)?;
90 }
91
92 init_thread_tracker(&output_path, Some(SamplingConfig::demo()))?;
94
95 Ok(())
96}
97
98pub fn stop_tracing() -> Result<(), Box<dyn std::error::Error>> {
116 if !TRACKING_ENABLED.load(Ordering::SeqCst) {
117 return Ok(()); }
119
120 let _ = finalize_thread_tracker();
122
123 TRACKING_ENABLED.store(false, Ordering::SeqCst);
125
126 let output_dir = OUTPUT_DIRECTORY
128 .get()
129 .ok_or("Output directory not set")?
130 .clone();
131
132 generate_reports(&output_dir)?;
133
134 println!(
135 "๐ Tracking complete: {}/memory_report.html",
136 output_dir.display()
137 );
138
139 Ok(())
140}
141
142pub fn is_tracking() -> bool {
157 TRACKING_ENABLED.load(Ordering::SeqCst)
158}
159
160#[derive(Debug, Clone)]
165pub struct MemorySnapshot {
166 pub current_mb: f64,
168 pub peak_mb: f64,
170 pub allocations: u64,
172 pub deallocations: u64,
174 pub active_threads: usize,
176}
177
178pub fn memory_snapshot() -> MemorySnapshot {
196 MemorySnapshot {
199 current_mb: 0.0,
200 peak_mb: 0.0,
201 allocations: 0,
202 deallocations: 0,
203 active_threads: if TRACKING_ENABLED.load(Ordering::SeqCst) {
204 1
205 } else {
206 0
207 },
208 }
209}
210
211#[macro_export]
234macro_rules! auto_trace {
235 ($output_dir:expr, $block:block) => {{
236 $crate::lockfree::api::trace_all($output_dir)?;
237 let result = (|| $block)();
238 $crate::lockfree::api::stop_tracing()?;
239 result
240 }};
241}
242
243pub fn quick_trace<F, R>(f: F) -> R
265where
266 F: FnOnce() -> R,
267{
268 let temp_dir = std::env::temp_dir().join("memscope_lockfree_quick");
269
270 if trace_all(&temp_dir).is_err() {
272 return f(); }
274
275 let result = f();
277
278 if stop_tracing().is_ok() {
280 println!("๐ Quick trace completed - check {}", temp_dir.display());
281 }
282
283 result
284}
285
286fn generate_reports(output_dir: &Path) -> Result<(), Box<dyn std::error::Error>> {
297 let aggregator = LockfreeAggregator::new(output_dir.to_path_buf());
298 let analysis = aggregator.aggregate_all_threads()?;
299
300 let comprehensive_analysis = ComprehensiveAnalysis {
302 memory_analysis: analysis.clone(),
303 resource_timeline: Vec::new(), performance_insights: PerformanceInsights {
305 primary_bottleneck: BottleneckType::Balanced,
306 cpu_efficiency_score: 50.0,
307 memory_efficiency_score: 75.0,
308 io_efficiency_score: 60.0,
309 recommendations: vec![
310 "Consider using memory pools for frequent allocations".to_string()
311 ],
312 thread_performance_ranking: Vec::new(),
313 },
314 correlation_metrics: CorrelationMetrics {
315 memory_cpu_correlation: 0.4,
316 memory_gpu_correlation: 0.5,
317 memory_io_correlation: 0.3,
318 allocation_rate_vs_cpu_usage: 0.3,
319 deallocation_rate_vs_memory_pressure: 0.2,
320 },
321 };
322
323 export_comprehensive_analysis(&comprehensive_analysis, output_dir, "api_export")?;
324
325 let json_path = output_dir.join("memory_data.json");
327 aggregator.export_analysis(&analysis, &json_path)?;
328
329 cleanup_intermediate_files_api(output_dir)?;
331
332 print_analysis_summary(&analysis);
334
335 Ok(())
336}
337
338fn print_analysis_summary(analysis: &super::analysis::LockfreeAnalysis) {
343 println!("\n๐ Lockfree Memory Analysis:");
344 println!(" ๐งต Threads analyzed: {}", analysis.thread_stats.len());
345 println!(
346 " ๐ Peak memory: {:.1} MB",
347 analysis.summary.peak_memory_usage as f64 / (1024.0 * 1024.0)
348 );
349 println!(
350 " ๐ Total allocations: {}",
351 analysis.summary.total_allocations
352 );
353 println!(
354 " โฉ๏ธ Total deallocations: {}",
355 analysis.summary.total_deallocations
356 );
357
358 if analysis.summary.total_allocations > 0 {
359 let efficiency = analysis.summary.total_deallocations as f64
360 / analysis.summary.total_allocations as f64
361 * 100.0;
362 println!(" โก Memory efficiency: {:.1}%", efficiency);
363 }
364}
365
366fn cleanup_intermediate_files_api(output_dir: &Path) -> Result<(), Box<dyn std::error::Error>> {
371 let mut cleaned_count = 0;
372
373 if let Ok(entries) = std::fs::read_dir(output_dir) {
375 for entry in entries.flatten() {
376 let path = entry.path();
377 if let Some(file_name) = path.file_name() {
378 if let Some(name_str) = file_name.to_str() {
379 if (name_str.starts_with("memscope_thread_")
381 && (name_str.ends_with(".bin") || name_str.ends_with(".freq")))
382 || (name_str.starts_with("thread_") && name_str.ends_with(".bin"))
383 {
384 if std::fs::remove_file(&path).is_ok() {
386 cleaned_count += 1;
387 }
388 }
389 }
390 }
391 }
392 }
393
394 if cleaned_count > 0 {
395 info!("Cleaned {} intermediate tracking files", cleaned_count);
396 }
397
398 Ok(())
399}
400
401#[cfg(test)]
402mod tests {
403 use super::*;
404 use std::fs;
405 use tempfile::TempDir;
406
407 fn create_test_dir() -> TempDir {
408 tempfile::tempdir().expect("Failed to create temp directory")
409 }
410
411 #[test]
412 fn test_trace_all_creates_directory() {
413 let temp_dir = create_test_dir();
414 let output_path = temp_dir.path().join("test_output");
415
416 let result = trace_all(&output_path);
417 assert!(result.is_ok());
418 assert!(output_path.exists());
419 assert!(TRACKING_ENABLED.load(Ordering::Relaxed));
420 }
421
422 #[test]
423 fn test_trace_all_cleans_existing_directory() {
424 let temp_dir = create_test_dir();
425 let output_path = temp_dir.path().join("test_output");
426
427 fs::create_dir_all(&output_path).unwrap();
429 let test_file = output_path.join("existing_file.txt");
430 fs::write(&test_file, "test content").unwrap();
431 assert!(test_file.exists());
432
433 let result = trace_all(&output_path);
435 assert!(result.is_ok());
436 assert!(output_path.exists());
437 assert!(!test_file.exists()); }
439
440 #[test]
441 fn test_stop_tracing() {
442 let temp_dir = create_test_dir();
443 let output_path = temp_dir.path().join("test_output");
444
445 let _ = trace_all(&output_path);
448
449 let result = stop_tracing();
451 assert!(result.is_ok());
452 }
453
454 #[test]
455 fn test_stop_tracing_without_start() {
456 TRACKING_ENABLED.store(false, Ordering::Relaxed);
458 let result = stop_tracing();
459 assert!(result.is_ok());
460 }
461
462 #[test]
463 fn test_trace_thread() {
464 let temp_dir = create_test_dir();
465 let output_path = temp_dir.path().join("test_output");
466
467 let result = trace_thread(&output_path);
468 assert!(result.is_ok());
469 assert!(output_path.exists());
470 }
471
472 #[test]
473 fn test_is_tracking() {
474 let temp_dir = create_test_dir();
475 let output_path = temp_dir.path().join("test_output");
476
477 let _initial_state = is_tracking();
480
481 let _ = trace_all(&output_path);
483 let _tracking_state = is_tracking();
484 let _ = stop_tracing();
485 let _final_state = is_tracking();
486
487 }
490
491 #[test]
492 fn test_memory_snapshot() {
493 let temp_dir = create_test_dir();
494 let output_path = temp_dir.path().join("test_output");
495
496 let snapshot1 = memory_snapshot();
498 assert!(snapshot1.active_threads <= 1); trace_all(&output_path).unwrap();
503 let snapshot2 = memory_snapshot();
504 assert_eq!(snapshot2.active_threads, 1); stop_tracing().unwrap();
508
509 let snapshot3 = memory_snapshot();
511 assert_eq!(snapshot3.active_threads, 0);
512 }
513
514 #[test]
515 fn test_quick_trace() {
516 let result = quick_trace(|| {
517 let _data = vec![0u8; 1024];
518 42
519 });
520 assert_eq!(result, 42);
521 }
522
523 #[test]
524 fn test_tracking_enabled_state() {
525 let temp_dir = create_test_dir();
526 let output_path = temp_dir.path().join("test_output");
527
528 let _initial_state = TRACKING_ENABLED.load(Ordering::Relaxed);
531
532 let _ = trace_all(&output_path);
534 let _enabled_state = TRACKING_ENABLED.load(Ordering::Relaxed);
535 let _ = stop_tracing();
536 let _final_state = TRACKING_ENABLED.load(Ordering::Relaxed);
537
538 }
541
542 #[test]
543 fn test_output_directory_persistence() {
544 let temp_dir = create_test_dir();
545 let output_path = temp_dir.path().join("test_output");
546
547 assert!(std::fs::create_dir_all(&output_path).is_ok());
550 assert!(output_path.exists());
551
552 let _ = trace_all(&output_path);
554 let _ = stop_tracing();
555 }
556
557 #[test]
558 fn test_sampling_config_creation() {
559 let config = SamplingConfig::default();
560
561 assert_eq!(config.large_allocation_rate, 1.0);
562 assert_eq!(config.medium_allocation_rate, 0.1);
563 assert_eq!(config.small_allocation_rate, 0.01);
564 assert_eq!(config.large_threshold, 10 * 1024);
565 assert_eq!(config.medium_threshold, 1024);
566 assert_eq!(config.frequency_threshold, 10);
567 }
568
569 #[test]
570 fn test_sampling_config_presets() {
571 let high_precision = SamplingConfig::high_precision();
572 assert!(high_precision.validate().is_ok());
573 assert_eq!(high_precision.large_allocation_rate, 1.0);
574 assert_eq!(high_precision.medium_allocation_rate, 0.5);
575
576 let performance_optimized = SamplingConfig::performance_optimized();
577 assert!(performance_optimized.validate().is_ok());
578 assert_eq!(performance_optimized.small_allocation_rate, 0.001);
579
580 let leak_detection = SamplingConfig::leak_detection();
581 assert!(leak_detection.validate().is_ok());
582 assert_eq!(leak_detection.medium_allocation_rate, 0.8);
583 }
584
585 #[test]
586 fn test_error_handling_invalid_path() {
587 let result = trace_all("");
589 let _ = result;
591 }
592
593 #[test]
594 fn test_memory_snapshot_structure() {
595 let snapshot = memory_snapshot();
596
597 assert!(snapshot.current_mb >= 0.0);
599 assert!(snapshot.peak_mb >= 0.0);
600 }
604}