memscope_rs/capture/backends/
core_tracker.rs1use super::core_types::{
7 AllocationInfo, MemoryStats, ThreadRegistryStats, TrackingError, TrackingResult,
8};
9use dashmap::DashMap;
10use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
11use std::sync::{Arc, OnceLock};
12use std::thread;
13
14const STRATEGY_GLOBAL_SINGLETON: u64 = 0;
15const STRATEGY_THREAD_LOCAL: u64 = 1;
16
17pub static TRACKING_STRATEGY: AtomicU64 = AtomicU64::new(STRATEGY_GLOBAL_SINGLETON);
18
19static GLOBAL_TRACKER: OnceLock<Arc<MemoryTracker>> = OnceLock::new();
20
21thread_local! {
22 static THREAD_LOCAL_TRACKER: Arc<MemoryTracker> = {
23 let tracker = Arc::new(MemoryTracker::new());
24 register_current_thread_tracker_local(&tracker);
25 tracker
26 };
27}
28
29static LOCAL_THREAD_REGISTRY: OnceLock<
31 Arc<dashmap::DashMap<thread::ThreadId, Arc<MemoryTracker>>>,
32> = OnceLock::new();
33
34fn get_local_registry() -> Arc<dashmap::DashMap<thread::ThreadId, Arc<MemoryTracker>>> {
35 LOCAL_THREAD_REGISTRY
36 .get_or_init(|| Arc::new(dashmap::DashMap::new()))
37 .clone()
38}
39
40fn register_current_thread_tracker_local(tracker: &Arc<MemoryTracker>) {
41 let thread_id = thread::current().id();
42 get_local_registry().insert(thread_id, tracker.clone());
43}
44
45pub struct MemoryTracker {
55 active_allocations: DashMap<usize, AllocationInfo>,
57
58 total_allocations: AtomicU64,
60 total_allocated: AtomicU64,
61 total_deallocations: AtomicU64,
62 total_deallocated: AtomicU64,
63 peak_allocations: AtomicUsize,
64 peak_memory: AtomicU64,
65
66 fast_mode: AtomicU64,
68}
69
70impl MemoryTracker {
71 pub fn new() -> Self {
73 let fast_mode = std::env::var("MEMSCOPE_TEST_MODE").is_ok() || cfg!(test);
74
75 Self {
76 active_allocations: DashMap::new(),
77 total_allocations: AtomicU64::new(0),
78 total_allocated: AtomicU64::new(0),
79 total_deallocations: AtomicU64::new(0),
80 total_deallocated: AtomicU64::new(0),
81 peak_allocations: AtomicUsize::new(0),
82 peak_memory: AtomicU64::new(0),
83 fast_mode: AtomicU64::new(fast_mode as u64),
84 }
85 }
86
87 pub fn track_allocation(&self, ptr: usize, size: usize) -> TrackingResult<()> {
93 let allocation = AllocationInfo::new(ptr, size);
94
95 self.active_allocations.insert(ptr, allocation);
97
98 self.total_allocations.fetch_add(1, Ordering::Relaxed);
100 self.total_allocated
101 .fetch_add(size as u64, Ordering::Relaxed);
102
103 loop {
105 let current_count = self.active_allocations.len();
106 let current_peak = self.peak_allocations.load(Ordering::Relaxed);
107 if current_count <= current_peak {
108 break;
109 }
110 if self
111 .peak_allocations
112 .compare_exchange_weak(
113 current_peak,
114 current_count,
115 Ordering::Relaxed,
116 Ordering::Relaxed,
117 )
118 .is_ok()
119 {
120 break;
121 }
122 }
123
124 loop {
126 let current_memory = self
127 .total_allocated
128 .load(Ordering::Relaxed)
129 .saturating_sub(self.total_deallocated.load(Ordering::Relaxed));
130 let current_peak_memory = self.peak_memory.load(Ordering::Relaxed);
131 if current_memory <= current_peak_memory {
132 break;
133 }
134 if self
135 .peak_memory
136 .compare_exchange_weak(
137 current_peak_memory,
138 current_memory,
139 Ordering::Relaxed,
140 Ordering::Relaxed,
141 )
142 .is_ok()
143 {
144 break;
145 }
146 }
147
148 Ok(())
149 }
150
151 pub fn track_deallocation(&self, ptr: usize) -> TrackingResult<bool> {
160 if let Some((_, allocation)) = self.active_allocations.remove(&ptr) {
162 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
164 self.total_deallocated
165 .fetch_add(allocation.size as u64, Ordering::Relaxed);
166 Ok(true)
167 } else {
168 #[cfg(debug_assertions)]
171 eprintln!(
172 "[memscope] Warning: deallocation called for untracked pointer {:x}. \
173 This may indicate a double-free or memory not tracked by memscope.",
174 ptr
175 );
176 Ok(false)
177 }
178 }
179
180 pub fn get_allocation_size(&self, ptr: usize) -> Option<usize> {
189 self.active_allocations.get(&ptr).map(|a| a.size)
190 }
191
192 pub fn associate_var(
201 &self,
202 ptr: usize,
203 var_name: String,
204 type_name: String,
205 source_file: Option<&str>,
206 source_line: Option<u32>,
207 ) -> TrackingResult<()> {
208 if let Some(mut allocation) = self.active_allocations.get_mut(&ptr) {
209 allocation.var_name = Some(var_name);
210 allocation.type_name = Some(type_name);
211 if let (Some(file), Some(line)) = (source_file, source_line) {
212 allocation.set_source_location(file, line);
213 }
214 }
215
216 Ok(())
217 }
218
219 pub fn fast_track_allocation(
226 &self,
227 ptr: usize,
228 size: usize,
229 var_name: String,
230 ) -> TrackingResult<()> {
231 let mut allocation = AllocationInfo::new(ptr, size);
232 allocation.var_name = Some(var_name);
233
234 self.active_allocations.insert(ptr, allocation);
236
237 self.total_allocations.fetch_add(1, Ordering::Relaxed);
239 self.total_allocated
240 .fetch_add(size as u64, Ordering::Relaxed);
241
242 Ok(())
243 }
244
245 pub fn get_stats(&self) -> TrackingResult<MemoryStats> {
247 let active_count = self.active_allocations.len();
248 let total_allocated = self.total_allocated.load(Ordering::Relaxed);
249 let total_deallocated = self.total_deallocated.load(Ordering::Relaxed);
250 let active_memory = total_allocated.saturating_sub(total_deallocated);
251
252 Ok(MemoryStats {
253 total_allocations: self.total_allocations.load(Ordering::Relaxed),
254 total_allocated,
255 active_allocations: active_count,
256 active_memory,
257 peak_allocations: self.peak_allocations.load(Ordering::Relaxed),
258 peak_memory: self.peak_memory.load(Ordering::Relaxed),
259 total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
260 total_deallocated,
261 leaked_allocations: 0,
262 leaked_memory: 0,
263 })
264 }
265
266 pub fn detect_leaks(&self) -> (usize, u64) {
272 let active_count = self.active_allocations.len();
273 let total_allocated = self.total_allocated.load(Ordering::Relaxed);
274 let total_deallocated = self.total_deallocated.load(Ordering::Relaxed);
275 let active_memory = total_allocated.saturating_sub(total_deallocated);
276 (active_count, active_memory)
277 }
278
279 pub fn get_active_allocations(&self) -> TrackingResult<Vec<AllocationInfo>> {
281 Ok(self
282 .active_allocations
283 .iter()
284 .map(|entry| entry.value().clone())
285 .collect())
286 }
287
288 pub fn get_memory_by_type(&self) -> TrackingResult<std::collections::HashMap<String, usize>> {
290 let mut type_sizes: std::collections::HashMap<String, usize> =
291 std::collections::HashMap::new();
292
293 for entry in self.active_allocations.iter() {
294 let alloc = entry.value();
295 let type_name = alloc
296 .type_name
297 .clone()
298 .unwrap_or_else(|| "unknown".to_string());
299 *type_sizes.entry(type_name).or_insert(0) += alloc.size;
300 }
301
302 Ok(type_sizes)
303 }
304
305 pub fn set_fast_mode(&self, enabled: bool) {
307 self.fast_mode.store(enabled as u64, Ordering::Relaxed);
308 }
309
310 pub fn is_fast_mode(&self) -> bool {
312 self.fast_mode.load(Ordering::Relaxed) != 0
313 }
314
315 pub fn enable_fast_mode(&self) {
317 self.fast_mode.store(1, Ordering::Relaxed);
318 }
319
320 pub fn ensure_memory_analysis_path<P: AsRef<std::path::Path>>(
322 &self,
323 path: P,
324 ) -> std::path::PathBuf {
325 let path = path.as_ref();
326 let base_dir = path
327 .parent()
328 .unwrap_or(std::path::Path::new("MemoryAnalysis"));
329
330 if let Err(e) = std::fs::create_dir_all(base_dir) {
331 tracing::warn!("Failed to create directory {:?}: {}", base_dir, e);
332 }
333
334 if base_dir == std::path::Path::new("") {
335 std::path::Path::new("MemoryAnalysis").join(path.file_name().unwrap_or_default())
336 } else {
337 path.to_path_buf()
338 }
339 }
340
341 pub fn ensure_memscope_path<P: AsRef<std::path::Path>>(&self, path: P) -> std::path::PathBuf {
343 let mut output_path = self.ensure_memory_analysis_path(path);
344
345 if output_path.extension().is_none()
346 || output_path.extension() != Some(std::ffi::OsStr::new("memscope"))
347 {
348 output_path.set_extension("memscope");
349 }
350
351 output_path
352 }
353
354 pub fn export_to_memscope<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
359 self.export_to_json(path)
360 }
361
362 pub fn export_to_json<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
364 let output_path = self.ensure_memory_analysis_path(&path);
365
366 let final_path = if output_path.is_dir() {
367 output_path.join("memory_analysis.json")
368 } else {
369 output_path
370 };
371
372 let allocations = self.get_active_allocations()?;
373
374 let json = serde_json::to_string_pretty(&allocations)
375 .map_err(|e| TrackingError::SerializationError(e.to_string()))?;
376
377 std::fs::write(&final_path, json).map_err(|e| TrackingError::ExportError(e.to_string()))?;
378
379 Ok(())
380 }
381}
382
383impl Default for MemoryTracker {
384 fn default() -> Self {
385 Self::new()
386 }
387}
388
389impl Drop for MemoryTracker {
390 fn drop(&mut self) {
391 if std::env::var("MEMSCOPE_VERBOSE").is_ok() {
392 tracing::info!(
393 "💡 Tip: Use tracker.export_to_json() before drop to save analysis results"
394 );
395 }
396
397 let active_count = self.active_allocations.len();
398 if active_count > 0 {
399 tracing::warn!(
400 "Dropping MemoryTracker with {} active allocations (potential memory leaks)",
401 active_count
402 );
403 }
404
405 self.active_allocations.clear();
407 }
408}
409
410pub fn configure_tracking_strategy(is_concurrent: bool) {
415 let strategy = if is_concurrent {
416 STRATEGY_THREAD_LOCAL
417 } else {
418 STRATEGY_GLOBAL_SINGLETON
419 };
420
421 TRACKING_STRATEGY.store(strategy, Ordering::Relaxed);
422
423 tracing::info!(
424 "Configured tracking strategy: {}",
425 if is_concurrent {
426 "thread-local"
427 } else {
428 "global-singleton"
429 }
430 );
431}
432
433pub fn get_tracker() -> Arc<MemoryTracker> {
439 match TRACKING_STRATEGY.load(Ordering::Relaxed) {
440 STRATEGY_GLOBAL_SINGLETON => GLOBAL_TRACKER
441 .get_or_init(|| Arc::new(MemoryTracker::new()))
442 .clone(),
443 STRATEGY_THREAD_LOCAL => THREAD_LOCAL_TRACKER.with(|tracker| tracker.clone()),
444 _ => {
445 tracing::warn!("Unknown tracking strategy, falling back to global singleton");
446 GLOBAL_TRACKER
447 .get_or_init(|| Arc::new(MemoryTracker::new()))
448 .clone()
449 }
450 }
451}
452
453pub fn collect_all_trackers_local() -> Vec<Arc<MemoryTracker>> {
455 get_local_registry()
456 .iter()
457 .map(|entry| entry.value().clone())
458 .collect()
459}
460
461pub fn get_registry_stats_local() -> ThreadRegistryStats {
463 let registry = get_local_registry();
464 let total_threads = registry.len();
465
466 ThreadRegistryStats {
467 total_threads_registered: total_threads,
468 active_threads: total_threads,
469 dead_references: 0,
470 }
471}
472
473pub fn has_active_trackers_local() -> bool {
475 !get_local_registry().is_empty()
476}
477
478#[cfg(test)]
479mod tests {
480 use super::*;
481
482 #[test]
483 fn test_memory_tracker_creation() {
484 let tracker = MemoryTracker::new();
485 assert!(tracker.is_fast_mode());
487 }
488
489 #[test]
490 fn test_track_allocation() {
491 let tracker = MemoryTracker::new();
492 let result = tracker.track_allocation(0x1000, 1024);
493 assert!(result.is_ok());
494
495 let stats = tracker.get_stats().unwrap();
496 assert_eq!(stats.total_allocations, 1);
497 assert_eq!(stats.active_allocations, 1);
498 }
499
500 #[test]
501 fn test_track_deallocation() {
502 let tracker = MemoryTracker::new();
503 tracker.track_allocation(0x1000, 1024).unwrap();
504 tracker.track_deallocation(0x1000).unwrap();
505
506 let stats = tracker.get_stats().unwrap();
507 assert_eq!(stats.total_deallocations, 1);
508 assert_eq!(stats.active_allocations, 0);
509 }
510
511 #[test]
512 fn test_associate_var() {
513 let tracker = MemoryTracker::new();
514 tracker.track_allocation(0x1000, 1024).unwrap();
515 tracker
516 .associate_var(
517 0x1000,
518 "test_var".to_string(),
519 "String".to_string(),
520 None,
521 None,
522 )
523 .unwrap();
524
525 let allocations = tracker.get_active_allocations().unwrap();
526 assert_eq!(allocations[0].var_name, Some("test_var".to_string()));
527 assert_eq!(allocations[0].type_name, Some("String".to_string()));
528 }
529
530 #[test]
531 fn test_fast_track_allocation() {
532 let tracker = MemoryTracker::new();
533 tracker
534 .fast_track_allocation(0x1000, 1024, "test_var".to_string())
535 .unwrap();
536
537 let allocations = tracker.get_active_allocations().unwrap();
538 assert_eq!(allocations[0].var_name, Some("test_var".to_string()));
539 }
540
541 #[test]
542 fn test_peak_tracking() {
543 let tracker = MemoryTracker::new();
544
545 tracker.track_allocation(0x1000, 1024).unwrap();
546 tracker.track_allocation(0x2000, 2048).unwrap();
547 tracker.track_allocation(0x3000, 4096).unwrap();
548
549 let stats = tracker.get_stats().unwrap();
550 assert_eq!(stats.peak_allocations, 3);
551 assert_eq!(stats.peak_memory, 7168);
552 }
553
554 #[test]
555 fn test_fast_mode() {
556 let tracker = MemoryTracker::new();
557 tracker.set_fast_mode(true);
558 assert!(tracker.is_fast_mode());
559
560 tracker.set_fast_mode(false);
561 assert!(!tracker.is_fast_mode());
562
563 tracker.enable_fast_mode();
564 assert!(tracker.is_fast_mode());
565 }
566
567 #[test]
568 fn test_export_to_json() {
569 let tracker = MemoryTracker::new();
570 tracker.track_allocation(0x1000, 1024).unwrap();
571
572 let temp_dir = tempfile::TempDir::new().expect("Failed to create temp dir");
573 let file_path = temp_dir.path().join("test_export.json");
574 let result = tracker.export_to_json(&file_path);
575 assert!(result.is_ok());
576 assert!(file_path.exists());
577 }
578
579 #[test]
580 fn test_global_tracker_singleton() {
581 configure_tracking_strategy(false);
582
583 let tracker1 = get_tracker();
584 let tracker2 = get_tracker();
585
586 assert!(Arc::ptr_eq(&tracker1, &tracker2));
587 }
588
589 #[test]
590 fn test_thread_local_tracker() {
591 configure_tracking_strategy(true);
592
593 let tracker1 = get_tracker();
594 let trackers = collect_all_trackers_local();
595
596 assert!(!trackers.is_empty());
597 assert!(trackers.iter().any(|t| Arc::ptr_eq(t, &tracker1)));
598 }
599}