Skip to main content

memscope_rs/capture/backends/
core_tracker.rs

1//! Core memory tracker implementation (self-contained, no old system dependencies)
2//!
3//! This module provides high-performance memory tracking using DashMap
4//! and atomic operations, completely independent of the old system.
5
6use super::core_types::{
7    AllocationInfo, MemoryStats, ThreadRegistryStats, TrackingError, TrackingResult,
8};
9use dashmap::DashMap;
10use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
11use std::sync::{Arc, OnceLock};
12use std::thread;
13use tracing::warn;
14
15const STRATEGY_GLOBAL_SINGLETON: u64 = 0;
16const STRATEGY_THREAD_LOCAL: u64 = 1;
17
18pub static TRACKING_STRATEGY: AtomicU64 = AtomicU64::new(STRATEGY_GLOBAL_SINGLETON);
19
20static GLOBAL_TRACKER: OnceLock<Arc<MemoryTracker>> = OnceLock::new();
21
22thread_local! {
23    static THREAD_LOCAL_TRACKER: Arc<MemoryTracker> = {
24        let tracker = Arc::new(MemoryTracker::new());
25        register_current_thread_tracker_local(&tracker);
26        tracker
27    };
28}
29
30/// Thread-local registry for tracking thread-local trackers
31static LOCAL_THREAD_REGISTRY: OnceLock<
32    Arc<dashmap::DashMap<thread::ThreadId, Arc<MemoryTracker>>>,
33> = OnceLock::new();
34
35fn get_local_registry() -> Arc<dashmap::DashMap<thread::ThreadId, Arc<MemoryTracker>>> {
36    LOCAL_THREAD_REGISTRY
37        .get_or_init(|| Arc::new(dashmap::DashMap::new()))
38        .clone()
39}
40
41fn register_current_thread_tracker_local(tracker: &Arc<MemoryTracker>) {
42    let thread_id = thread::current().id();
43    get_local_registry().insert(thread_id, tracker.clone());
44}
45
46/// Core memory tracking functionality.
47///
48/// The MemoryTracker maintains records of all memory allocations and deallocations,
49/// provides statistics, and supports exporting data in various formats.
50///
51/// # Performance Characteristics
52/// - **DashMap**: Lock-free concurrent access for allocations
53/// - **Atomic counters**: Lock-free statistics updates
54/// - **Thread-local**: Zero-contention for per-thread tracking
55pub struct MemoryTracker {
56    /// Active allocations (DashMap for lock-free concurrent access)
57    active_allocations: DashMap<usize, AllocationInfo>,
58
59    /// Atomic statistics (lock-free updates)
60    total_allocations: AtomicU64,
61    total_allocated: AtomicU64,
62    total_deallocations: AtomicU64,
63    total_deallocated: AtomicU64,
64    peak_allocations: AtomicUsize,
65    peak_memory: AtomicU64,
66
67    /// Fast mode flag for testing (reduces overhead)
68    fast_mode: AtomicU64,
69}
70
71impl MemoryTracker {
72    /// Create a new memory tracker.
73    pub fn new() -> Self {
74        let fast_mode = std::env::var("MEMSCOPE_TEST_MODE").is_ok() || cfg!(test);
75
76        Self {
77            active_allocations: DashMap::new(),
78            total_allocations: AtomicU64::new(0),
79            total_allocated: AtomicU64::new(0),
80            total_deallocations: AtomicU64::new(0),
81            total_deallocated: AtomicU64::new(0),
82            peak_allocations: AtomicUsize::new(0),
83            peak_memory: AtomicU64::new(0),
84            fast_mode: AtomicU64::new(fast_mode as u64),
85        }
86    }
87
88    /// Track a memory allocation.
89    ///
90    /// # Arguments
91    /// * `ptr` - Memory pointer address
92    /// * `size` - Allocation size in bytes
93    pub fn track_allocation(&self, ptr: usize, size: usize) -> TrackingResult<()> {
94        let allocation = AllocationInfo::new(ptr, size);
95
96        // Insert into DashMap (lock-free)
97        self.active_allocations.insert(ptr, allocation);
98
99        // Update atomic statistics (lock-free)
100        self.total_allocations.fetch_add(1, Ordering::Relaxed);
101        self.total_allocated
102            .fetch_add(size as u64, Ordering::Relaxed);
103
104        // Update peak allocations using CAS loop to avoid TOCTOU race
105        loop {
106            let current_count = self.active_allocations.len();
107            let current_peak = self.peak_allocations.load(Ordering::Relaxed);
108            if current_count <= current_peak {
109                break;
110            }
111            if self
112                .peak_allocations
113                .compare_exchange_weak(
114                    current_peak,
115                    current_count,
116                    Ordering::Relaxed,
117                    Ordering::Relaxed,
118                )
119                .is_ok()
120            {
121                break;
122            }
123        }
124
125        // Update peak memory using CAS loop
126        loop {
127            let current_memory = self
128                .total_allocated
129                .load(Ordering::Relaxed)
130                .saturating_sub(self.total_deallocated.load(Ordering::Relaxed));
131            let current_peak_memory = self.peak_memory.load(Ordering::Relaxed);
132            if current_memory <= current_peak_memory {
133                break;
134            }
135            if self
136                .peak_memory
137                .compare_exchange_weak(
138                    current_peak_memory,
139                    current_memory,
140                    Ordering::Relaxed,
141                    Ordering::Relaxed,
142                )
143                .is_ok()
144            {
145                break;
146            }
147        }
148
149        Ok(())
150    }
151
152    /// Track a memory deallocation.
153    ///
154    /// # Arguments
155    /// * `ptr` - Memory pointer address
156    ///
157    /// # Returns
158    /// * `Ok(true)` if the allocation was found and removed
159    /// * `Ok(false)` if the pointer was not tracked (possible double-free or untracked allocation)
160    pub fn track_deallocation(&self, ptr: usize) -> TrackingResult<bool> {
161        // Remove from DashMap (lock-free)
162        if let Some((_, allocation)) = self.active_allocations.remove(&ptr) {
163            // Update atomic statistics (lock-free)
164            self.total_deallocations.fetch_add(1, Ordering::Relaxed);
165            self.total_deallocated
166                .fetch_add(allocation.size as u64, Ordering::Relaxed);
167            Ok(true)
168        } else {
169            // Pointer not found - could be double-free or untracked allocation
170            // Log warning in debug mode
171            #[cfg(debug_assertions)]
172            warn!(
173                "deallocation called for untracked pointer {:x}. \
174                 This may indicate a double-free or memory not tracked by memscope.",
175                ptr
176            );
177            Ok(false)
178        }
179    }
180
181    /// Get the size of an active allocation.
182    ///
183    /// # Arguments
184    /// * `ptr` - Memory pointer address
185    ///
186    /// # Returns
187    /// * `Some(size)` if the allocation exists
188    /// * `None` if the pointer is not tracked
189    pub fn get_allocation_size(&self, ptr: usize) -> Option<usize> {
190        self.active_allocations.get(&ptr).map(|a| a.size)
191    }
192
193    /// Associate a variable name and type with an allocation.
194    ///
195    /// # Arguments
196    /// * `ptr` - Memory pointer address
197    /// * `var_name` - Variable name
198    /// * `type_name` - Type name
199    /// * `source_file` - Source file (optional)
200    /// * `source_line` - Source line (optional)
201    pub fn associate_var(
202        &self,
203        ptr: usize,
204        var_name: String,
205        type_name: String,
206        source_file: Option<&str>,
207        source_line: Option<u32>,
208    ) -> TrackingResult<()> {
209        if let Some(mut allocation) = self.active_allocations.get_mut(&ptr) {
210            allocation.var_name = Some(var_name);
211            allocation.type_name = Some(type_name);
212            if let (Some(file), Some(line)) = (source_file, source_line) {
213                allocation.set_source_location(file, line);
214            }
215        }
216
217        Ok(())
218    }
219
220    /// Fast track allocation with variable name.
221    ///
222    /// # Arguments
223    /// * `ptr` - Memory pointer address
224    /// * `size` - Allocation size in bytes
225    /// * `var_name` - Variable name
226    pub fn fast_track_allocation(
227        &self,
228        ptr: usize,
229        size: usize,
230        var_name: String,
231    ) -> TrackingResult<()> {
232        let mut allocation = AllocationInfo::new(ptr, size);
233        allocation.var_name = Some(var_name);
234
235        // Insert into DashMap (lock-free)
236        self.active_allocations.insert(ptr, allocation);
237
238        // Update atomic statistics (lock-free)
239        self.total_allocations.fetch_add(1, Ordering::Relaxed);
240        self.total_allocated
241            .fetch_add(size as u64, Ordering::Relaxed);
242
243        Ok(())
244    }
245
246    /// Get current memory statistics.
247    pub fn get_stats(&self) -> TrackingResult<MemoryStats> {
248        let active_count = self.active_allocations.len();
249        let total_allocated = self.total_allocated.load(Ordering::Relaxed);
250        let total_deallocated = self.total_deallocated.load(Ordering::Relaxed);
251        let active_memory = total_allocated.saturating_sub(total_deallocated);
252
253        Ok(MemoryStats {
254            total_allocations: self.total_allocations.load(Ordering::Relaxed),
255            total_allocated,
256            active_allocations: active_count,
257            active_memory,
258            peak_allocations: self.peak_allocations.load(Ordering::Relaxed),
259            peak_memory: self.peak_memory.load(Ordering::Relaxed),
260            total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
261            total_deallocated,
262            leaked_allocations: 0,
263            leaked_memory: 0,
264        })
265    }
266
267    /// Detect memory leaks at program shutdown.
268    ///
269    /// This should be called when the program is shutting down to detect
270    /// allocations that were never freed. Returns the count and total size
271    /// of allocations that are still active.
272    pub fn detect_leaks(&self) -> (usize, u64) {
273        let active_count = self.active_allocations.len();
274        let total_allocated = self.total_allocated.load(Ordering::Relaxed);
275        let total_deallocated = self.total_deallocated.load(Ordering::Relaxed);
276        let active_memory = total_allocated.saturating_sub(total_deallocated);
277        (active_count, active_memory)
278    }
279
280    /// Get all currently active allocations.
281    pub fn get_active_allocations(&self) -> TrackingResult<Vec<AllocationInfo>> {
282        Ok(self
283            .active_allocations
284            .iter()
285            .map(|entry| entry.value().clone())
286            .collect())
287    }
288
289    /// Get memory grouped by type.
290    pub fn get_memory_by_type(&self) -> TrackingResult<std::collections::HashMap<String, usize>> {
291        let mut type_sizes: std::collections::HashMap<String, usize> =
292            std::collections::HashMap::new();
293
294        for entry in self.active_allocations.iter() {
295            let alloc = entry.value();
296            let type_name = alloc
297                .type_name
298                .clone()
299                .unwrap_or_else(|| "unknown".to_string());
300            *type_sizes.entry(type_name).or_insert(0) += alloc.size;
301        }
302
303        Ok(type_sizes)
304    }
305
306    /// Enable or disable fast mode.
307    pub fn set_fast_mode(&self, enabled: bool) {
308        self.fast_mode.store(enabled as u64, Ordering::Relaxed);
309    }
310
311    /// Check if fast mode is enabled.
312    pub fn is_fast_mode(&self) -> bool {
313        self.fast_mode.load(Ordering::Relaxed) != 0
314    }
315
316    /// Enable fast mode for testing.
317    pub fn enable_fast_mode(&self) {
318        self.fast_mode.store(1, Ordering::Relaxed);
319    }
320
321    /// Ensure memory analysis path exists and return the full path.
322    pub fn ensure_memory_analysis_path<P: AsRef<std::path::Path>>(
323        &self,
324        path: P,
325    ) -> std::path::PathBuf {
326        let path = path.as_ref();
327        let base_dir = path
328            .parent()
329            .unwrap_or(std::path::Path::new("MemoryAnalysis"));
330
331        if let Err(e) = std::fs::create_dir_all(base_dir) {
332            tracing::warn!("Failed to create directory {:?}: {}", base_dir, e);
333        }
334
335        if base_dir == std::path::Path::new("") {
336            std::path::Path::new("MemoryAnalysis").join(path.file_name().unwrap_or_default())
337        } else {
338            path.to_path_buf()
339        }
340    }
341
342    /// Ensure path uses .memscope extension and is in MemoryAnalysis directory.
343    pub fn ensure_memscope_path<P: AsRef<std::path::Path>>(&self, path: P) -> std::path::PathBuf {
344        let mut output_path = self.ensure_memory_analysis_path(path);
345
346        if output_path.extension().is_none()
347            || output_path.extension() != Some(std::ffi::OsStr::new("memscope"))
348        {
349            output_path.set_extension("memscope");
350        }
351
352        output_path
353    }
354
355    /// Export memory tracking data to .memscope file format (JSON content).
356    ///
357    /// This method exports memory tracking data with a .memscope file extension.
358    /// The content is serialized as JSON for human readability and interoperability.
359    pub fn export_to_memscope<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
360        self.export_to_json(path)
361    }
362
363    /// Export memory tracking data to JSON format.
364    pub fn export_to_json<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
365        let output_path = self.ensure_memory_analysis_path(&path);
366
367        let final_path = if output_path.is_dir() {
368            output_path.join("memory_analysis.json")
369        } else {
370            output_path
371        };
372
373        let allocations = self.get_active_allocations()?;
374
375        let json = serde_json::to_string_pretty(&allocations)
376            .map_err(|e| TrackingError::SerializationError(e.to_string()))?;
377
378        std::fs::write(&final_path, json).map_err(|e| TrackingError::ExportError(e.to_string()))?;
379
380        Ok(())
381    }
382}
383
384impl Default for MemoryTracker {
385    fn default() -> Self {
386        Self::new()
387    }
388}
389
390impl Drop for MemoryTracker {
391    fn drop(&mut self) {
392        if std::env::var("MEMSCOPE_VERBOSE").is_ok() {
393            tracing::info!(
394                "💡 Tip: Use tracker.export_to_json() before drop to save analysis results"
395            );
396        }
397
398        let active_count = self.active_allocations.len();
399        if active_count > 0 {
400            tracing::warn!(
401                "Dropping MemoryTracker with {} active allocations (potential memory leaks)",
402                active_count
403            );
404        }
405
406        // Clear active_allocations to release memory
407        self.active_allocations.clear();
408    }
409}
410
411/// Configure tracking strategy for the application.
412///
413/// # Arguments
414/// * `is_concurrent` - true for multi-threaded/async applications, false for single-threaded
415pub fn configure_tracking_strategy(is_concurrent: bool) {
416    let strategy = if is_concurrent {
417        STRATEGY_THREAD_LOCAL
418    } else {
419        STRATEGY_GLOBAL_SINGLETON
420    };
421
422    TRACKING_STRATEGY.store(strategy, Ordering::Relaxed);
423
424    tracing::info!(
425        "Configured tracking strategy: {}",
426        if is_concurrent {
427            "thread-local"
428        } else {
429            "global-singleton"
430        }
431    );
432}
433
434/// Get the appropriate memory tracker based on the current strategy.
435///
436/// # Returns
437/// * In single-threaded mode: returns the global singleton tracker
438/// * In concurrent mode: returns the current thread's local tracker
439pub fn get_tracker() -> Arc<MemoryTracker> {
440    match TRACKING_STRATEGY.load(Ordering::Relaxed) {
441        STRATEGY_GLOBAL_SINGLETON => GLOBAL_TRACKER
442            .get_or_init(|| Arc::new(MemoryTracker::new()))
443            .clone(),
444        STRATEGY_THREAD_LOCAL => THREAD_LOCAL_TRACKER.with(|tracker| tracker.clone()),
445        _ => {
446            tracing::warn!("Unknown tracking strategy, falling back to global singleton");
447            GLOBAL_TRACKER
448                .get_or_init(|| Arc::new(MemoryTracker::new()))
449                .clone()
450        }
451    }
452}
453
454/// Collect all thread-local trackers.
455pub fn collect_all_trackers_local() -> Vec<Arc<MemoryTracker>> {
456    get_local_registry()
457        .iter()
458        .map(|entry| entry.value().clone())
459        .collect()
460}
461
462/// Get registry statistics.
463pub fn get_registry_stats_local() -> ThreadRegistryStats {
464    let registry = get_local_registry();
465    let total_threads = registry.len();
466
467    ThreadRegistryStats {
468        total_threads_registered: total_threads,
469        active_threads: total_threads,
470        dead_references: 0,
471    }
472}
473
474/// Check if there are active trackers.
475pub fn has_active_trackers_local() -> bool {
476    !get_local_registry().is_empty()
477}
478
479#[cfg(test)]
480mod tests {
481    use super::*;
482    use std::sync::Arc;
483    use std::thread;
484
485    /// Objective: Verify MemoryTracker creation with default values
486    /// Invariants: New tracker should have zero allocations and fast_mode enabled in test
487    #[test]
488    fn test_memory_tracker_creation() {
489        let tracker = MemoryTracker::new();
490        assert!(
491            tracker.is_fast_mode(),
492            "Fast mode should be enabled in test mode"
493        );
494
495        let stats = tracker.get_stats().expect("Should get stats");
496        assert_eq!(
497            stats.total_allocations, 0,
498            "Initial total allocations should be 0"
499        );
500        assert_eq!(
501            stats.active_allocations, 0,
502            "Initial active allocations should be 0"
503        );
504        assert_eq!(
505            stats.peak_allocations, 0,
506            "Initial peak allocations should be 0"
507        );
508    }
509
510    /// Objective: Verify Default trait implementation
511    /// Invariants: Default should create same as new()
512    #[test]
513    fn test_memory_tracker_default() {
514        let tracker = MemoryTracker::default();
515        let stats = tracker.get_stats().expect("Should get stats");
516        assert_eq!(
517            stats.total_allocations, 0,
518            "Default tracker should have 0 allocations"
519        );
520    }
521
522    /// Objective: Verify track_allocation updates statistics correctly
523    /// Invariants: Should increment total_allocations and active_allocations
524    #[test]
525    fn test_track_allocation() {
526        let tracker = MemoryTracker::new();
527        let result = tracker.track_allocation(0x1000, 1024);
528        assert!(result.is_ok(), "track_allocation should succeed");
529
530        let stats = tracker.get_stats().expect("Should get stats");
531        assert_eq!(stats.total_allocations, 1, "Total allocations should be 1");
532        assert_eq!(
533            stats.active_allocations, 1,
534            "Active allocations should be 1"
535        );
536        assert_eq!(
537            stats.total_allocated, 1024,
538            "Total allocated should be 1024"
539        );
540    }
541
542    /// Objective: Verify track_deallocation removes allocation correctly
543    /// Invariants: Should decrement active_allocations and increment total_deallocations
544    #[test]
545    fn test_track_deallocation() {
546        let tracker = MemoryTracker::new();
547        tracker.track_allocation(0x1000, 1024).unwrap();
548        let result = tracker.track_deallocation(0x1000);
549        assert!(result.is_ok(), "track_deallocation should succeed");
550        assert!(
551            result.unwrap(),
552            "track_deallocation should return true for tracked pointer"
553        );
554
555        let stats = tracker.get_stats().expect("Should get stats");
556        assert_eq!(
557            stats.total_deallocations, 1,
558            "Total deallocations should be 1"
559        );
560        assert_eq!(
561            stats.active_allocations, 0,
562            "Active allocations should be 0"
563        );
564        assert_eq!(
565            stats.total_deallocated, 1024,
566            "Total deallocated should be 1024"
567        );
568    }
569
570    /// Objective: Verify deallocation of untracked pointer returns false
571    /// Invariants: Should return Ok(false) for untracked pointer
572    #[test]
573    fn test_deallocation_untracked_pointer() {
574        let tracker = MemoryTracker::new();
575        let result = tracker.track_deallocation(0xdead);
576        assert!(result.is_ok(), "Should not error on untracked pointer");
577        assert!(
578            !result.unwrap(),
579            "Should return false for untracked pointer"
580        );
581    }
582
583    /// Objective: Verify get_allocation_size returns correct size
584    /// Invariants: Should return Some(size) for tracked pointer
585    #[test]
586    fn test_get_allocation_size() {
587        let tracker = MemoryTracker::new();
588        tracker.track_allocation(0x1000, 2048).unwrap();
589
590        let size = tracker.get_allocation_size(0x1000);
591        assert_eq!(size, Some(2048), "Should return correct allocation size");
592    }
593
594    /// Objective: Verify get_allocation_size returns None for untracked pointer
595    /// Invariants: Should return None for untracked pointer
596    #[test]
597    fn test_get_allocation_size_untracked() {
598        let tracker = MemoryTracker::new();
599
600        let size = tracker.get_allocation_size(0xdead);
601        assert!(size.is_none(), "Should return None for untracked pointer");
602    }
603
604    /// Objective: Verify associate_var updates allocation metadata
605    /// Invariants: Should set var_name and type_name correctly
606    #[test]
607    fn test_associate_var() {
608        let tracker = MemoryTracker::new();
609        tracker.track_allocation(0x1000, 1024).unwrap();
610        tracker
611            .associate_var(
612                0x1000,
613                "test_var".to_string(),
614                "String".to_string(),
615                Some("test.rs"),
616                Some(42),
617            )
618            .unwrap();
619
620        let allocations = tracker.get_active_allocations().unwrap();
621        assert_eq!(
622            allocations[0].var_name,
623            Some("test_var".to_string()),
624            "Variable name should be set"
625        );
626        assert_eq!(
627            allocations[0].type_name,
628            Some("String".to_string()),
629            "Type name should be set"
630        );
631    }
632
633    /// Objective: Verify associate_var for non-existent pointer
634    /// Invariants: Should succeed silently without error
635    #[test]
636    fn test_associate_var_nonexistent() {
637        let tracker = MemoryTracker::new();
638        let result = tracker.associate_var(
639            0xdead,
640            "test_var".to_string(),
641            "String".to_string(),
642            None,
643            None,
644        );
645        assert!(
646            result.is_ok(),
647            "Should succeed silently for non-existent pointer"
648        );
649    }
650
651    /// Objective: Verify fast_track_allocation with variable name
652    /// Invariants: Should track allocation with var_name in one call
653    #[test]
654    fn test_fast_track_allocation() {
655        let tracker = MemoryTracker::new();
656        tracker
657            .fast_track_allocation(0x1000, 1024, "test_var".to_string())
658            .unwrap();
659
660        let allocations = tracker.get_active_allocations().unwrap();
661        assert_eq!(
662            allocations[0].var_name,
663            Some("test_var".to_string()),
664            "Variable name should be set"
665        );
666        assert_eq!(allocations[0].size, 1024, "Size should be correct");
667    }
668
669    /// Objective: Verify peak allocations tracking
670    /// Invariants: Peak should track maximum concurrent allocations
671    #[test]
672    fn test_peak_tracking() {
673        let tracker = MemoryTracker::new();
674
675        tracker.track_allocation(0x1000, 1024).unwrap();
676        tracker.track_allocation(0x2000, 2048).unwrap();
677        tracker.track_allocation(0x3000, 4096).unwrap();
678
679        let stats = tracker.get_stats().unwrap();
680        assert_eq!(stats.peak_allocations, 3, "Peak allocations should be 3");
681        assert_eq!(stats.peak_memory, 7168, "Peak memory should be 7168");
682
683        tracker.track_deallocation(0x1000).unwrap();
684        let stats = tracker.get_stats().unwrap();
685        assert_eq!(
686            stats.peak_allocations, 3,
687            "Peak should remain 3 after deallocation"
688        );
689        assert_eq!(
690            stats.active_allocations, 2,
691            "Active should be 2 after deallocation"
692        );
693    }
694
695    /// Objective: Verify fast mode toggle
696    /// Invariants: Fast mode should be toggleable
697    #[test]
698    fn test_fast_mode() {
699        let tracker = MemoryTracker::new();
700        tracker.set_fast_mode(true);
701        assert!(tracker.is_fast_mode(), "Fast mode should be enabled");
702
703        tracker.set_fast_mode(false);
704        assert!(!tracker.is_fast_mode(), "Fast mode should be disabled");
705
706        tracker.enable_fast_mode();
707        assert!(tracker.is_fast_mode(), "Fast mode should be enabled again");
708    }
709
710    /// Objective: Verify detect_leaks returns correct counts
711    /// Invariants: Should return count and size of active allocations
712    #[test]
713    fn test_detect_leaks() {
714        let tracker = MemoryTracker::new();
715        tracker.track_allocation(0x1000, 1024).unwrap();
716        tracker.track_allocation(0x2000, 2048).unwrap();
717
718        let (count, size) = tracker.detect_leaks();
719        assert_eq!(count, 2, "Should detect 2 leaks");
720        assert_eq!(size, 3072, "Total leak size should be 3072");
721
722        tracker.track_deallocation(0x1000).unwrap();
723        let (count, _) = tracker.detect_leaks();
724        assert_eq!(count, 1, "Should detect 1 leak after deallocation");
725    }
726
727    /// Objective: Verify get_memory_by_type groups allocations
728    /// Invariants: Should group allocations by type name
729    #[test]
730    fn test_get_memory_by_type() {
731        let tracker = MemoryTracker::new();
732        tracker.track_allocation(0x1000, 1024).unwrap();
733        tracker.track_allocation(0x2000, 2048).unwrap();
734
735        tracker
736            .associate_var(0x1000, "v1".to_string(), "String".to_string(), None, None)
737            .unwrap();
738        tracker
739            .associate_var(0x2000, "v2".to_string(), "String".to_string(), None, None)
740            .unwrap();
741
742        let by_type = tracker.get_memory_by_type().unwrap();
743        assert_eq!(
744            by_type.get("String"),
745            Some(&3072),
746            "String type should have 3072 bytes"
747        );
748    }
749
750    /// Objective: Verify get_memory_by_type with unknown types
751    /// Invariants: Unknown types should be grouped as "unknown"
752    #[test]
753    fn test_get_memory_by_type_unknown() {
754        let tracker = MemoryTracker::new();
755        tracker.track_allocation(0x1000, 1024).unwrap();
756
757        let by_type = tracker.get_memory_by_type().unwrap();
758        assert_eq!(
759            by_type.get("unknown"),
760            Some(&1024),
761            "Unknown type should have 1024 bytes"
762        );
763    }
764
765    /// Objective: Verify export_to_json creates valid file
766    /// Invariants: Should create file with valid JSON content
767    #[test]
768    fn test_export_to_json() {
769        let tracker = MemoryTracker::new();
770        tracker.track_allocation(0x1000, 1024).unwrap();
771
772        let temp_dir = tempfile::TempDir::new().expect("Failed to create temp dir");
773        let file_path = temp_dir.path().join("test_export.json");
774        let result = tracker.export_to_json(&file_path);
775        assert!(result.is_ok(), "Export should succeed");
776        assert!(file_path.exists(), "Export file should exist");
777
778        let content = std::fs::read_to_string(&file_path).expect("Should read file");
779        assert!(!content.is_empty(), "JSON content should not be empty");
780        assert!(content.contains("size"), "JSON should contain size field");
781    }
782
783    /// Objective: Verify export_to_memscope creates file
784    /// Invariants: Should create file with valid content
785    #[test]
786    fn test_export_to_memscope() {
787        let tracker = MemoryTracker::new();
788        tracker.track_allocation(0x1000, 1024).unwrap();
789
790        let temp_dir = tempfile::TempDir::new().expect("Failed to create temp dir");
791        let file_path = temp_dir.path().join("test_export.memscope");
792        let result = tracker.export_to_memscope(&file_path);
793        assert!(result.is_ok(), "Export should succeed: {:?}", result);
794
795        assert!(
796            file_path.exists(),
797            "Export file should exist at {:?}",
798            file_path
799        );
800
801        let content = std::fs::read_to_string(&file_path).expect("Should read file");
802        assert!(!content.is_empty(), "Export content should not be empty");
803    }
804
805    /// Objective: Verify ensure_memory_analysis_path creates directory
806    /// Invariants: Should create parent directory if needed
807    #[test]
808    fn test_ensure_memory_analysis_path() {
809        let tracker = MemoryTracker::new();
810        let temp_dir = tempfile::TempDir::new().expect("Failed to create temp dir");
811        let nested_path = temp_dir.path().join("nested").join("dir").join("file.json");
812
813        let result = tracker.ensure_memory_analysis_path(&nested_path);
814        assert!(
815            result.parent().unwrap().exists(),
816            "Parent directory should be created"
817        );
818    }
819
820    /// Objective: Verify global tracker singleton behavior
821    /// Invariants: Should return same instance when using global strategy
822    #[test]
823    fn test_global_tracker_singleton() {
824        let tracker1 = GLOBAL_TRACKER
825            .get_or_init(|| Arc::new(MemoryTracker::new()))
826            .clone();
827        let tracker2 = GLOBAL_TRACKER
828            .get_or_init(|| Arc::new(MemoryTracker::new()))
829            .clone();
830
831        assert!(
832            Arc::ptr_eq(&tracker1, &tracker2),
833            "Should return same instance from GLOBAL_TRACKER"
834        );
835    }
836
837    /// Objective: Verify thread-local tracker behavior
838    /// Invariants: Should register thread-local tracker
839    #[test]
840    fn test_thread_local_tracker() {
841        configure_tracking_strategy(true);
842
843        let tracker1 = get_tracker();
844        let trackers = collect_all_trackers_local();
845
846        assert!(!trackers.is_empty(), "Should have at least one tracker");
847        assert!(
848            trackers.iter().any(|t| Arc::ptr_eq(t, &tracker1)),
849            "Current thread's tracker should be in registry"
850        );
851    }
852
853    /// Objective: Verify registry statistics
854    /// Invariants: Should return correct thread count
855    #[test]
856    fn test_registry_stats() {
857        configure_tracking_strategy(true);
858        get_tracker();
859
860        let stats = get_registry_stats_local();
861        assert!(
862            stats.total_threads_registered >= 1,
863            "Should have at least one thread registered"
864        );
865        assert_eq!(stats.dead_references, 0, "Should have no dead references");
866    }
867
868    /// Objective: Verify has_active_trackers_local
869    /// Invariants: Should return true when trackers exist
870    #[test]
871    fn test_has_active_trackers() {
872        configure_tracking_strategy(true);
873        get_tracker();
874
875        assert!(has_active_trackers_local(), "Should have active trackers");
876    }
877
878    /// Objective: Verify concurrent allocation tracking
879    /// Invariants: Should handle concurrent allocations correctly
880    #[test]
881    fn test_concurrent_allocations() {
882        let tracker = Arc::new(MemoryTracker::new());
883        let mut handles = vec![];
884
885        for i in 0..10 {
886            let tracker_clone = tracker.clone();
887            let handle = thread::spawn(move || {
888                let ptr = 0x1000 + i * 0x100;
889                tracker_clone.track_allocation(ptr, 1024).unwrap();
890            });
891            handles.push(handle);
892        }
893
894        for handle in handles {
895            handle.join().unwrap();
896        }
897
898        let stats = tracker.get_stats().unwrap();
899        assert_eq!(
900            stats.total_allocations, 10,
901            "Should have 10 allocations from 10 threads"
902        );
903    }
904
905    /// Objective: Verify concurrent allocation and deallocation
906    /// Invariants: Should maintain consistency under concurrent operations
907    #[test]
908    fn test_concurrent_alloc_dealloc() {
909        let tracker = Arc::new(MemoryTracker::new());
910        let mut handles = vec![];
911
912        for i in 0..5 {
913            let tracker_clone = tracker.clone();
914            let handle = thread::spawn(move || {
915                let ptr = 0x1000 + i * 0x100;
916                tracker_clone.track_allocation(ptr, 1024).unwrap();
917                tracker_clone.track_deallocation(ptr).unwrap();
918            });
919            handles.push(handle);
920        }
921
922        for handle in handles {
923            handle.join().unwrap();
924        }
925
926        let stats = tracker.get_stats().unwrap();
927        assert_eq!(
928            stats.active_allocations, 0,
929            "All allocations should be deallocated"
930        );
931        assert_eq!(
932            stats.total_allocations, 5,
933            "Should have 5 total allocations"
934        );
935        assert_eq!(
936            stats.total_deallocations, 5,
937            "Should have 5 total deallocations"
938        );
939    }
940
941    /// Objective: Verify zero-size allocation handling
942    /// Invariants: Should handle zero-size allocation without error
943    #[test]
944    fn test_zero_size_allocation() {
945        let tracker = MemoryTracker::new();
946        let result = tracker.track_allocation(0x1000, 0);
947        assert!(result.is_ok(), "Zero-size allocation should succeed");
948
949        let stats = tracker.get_stats().unwrap();
950        assert_eq!(
951            stats.total_allocations, 1,
952            "Should count zero-size allocation"
953        );
954    }
955
956    /// Objective: Verify large allocation handling
957    /// Invariants: Should handle large allocations correctly
958    #[test]
959    fn test_large_allocation() {
960        let tracker = MemoryTracker::new();
961        let large_size = 1024 * 1024 * 1024;
962        let result = tracker.track_allocation(0x1000, large_size);
963        assert!(result.is_ok(), "Large allocation should succeed");
964
965        let stats = tracker.get_stats().unwrap();
966        assert_eq!(
967            stats.total_allocated as usize, large_size,
968            "Should track large allocation size"
969        );
970    }
971
972    /// Objective: Verify multiple allocations at same address
973    /// Invariants: Later allocation should overwrite earlier one
974    #[test]
975    fn test_duplicate_address_allocation() {
976        let tracker = MemoryTracker::new();
977        tracker.track_allocation(0x1000, 1024).unwrap();
978        tracker.track_allocation(0x1000, 2048).unwrap();
979
980        let stats = tracker.get_stats().unwrap();
981        assert_eq!(
982            stats.active_allocations, 1,
983            "Should have 1 active allocation"
984        );
985        assert_eq!(stats.total_allocations, 2, "Should count both allocations");
986
987        let size = tracker.get_allocation_size(0x1000);
988        assert_eq!(size, Some(2048), "Should have later allocation size");
989    }
990
991    /// Objective: Verify Drop implementation logs warnings
992    /// Invariants: Should not panic when dropping with active allocations
993    #[test]
994    fn test_drop_with_active_allocations() {
995        let tracker = MemoryTracker::new();
996        tracker.track_allocation(0x1000, 1024).unwrap();
997
998        drop(tracker);
999    }
1000
1001    /// Objective: Verify get_active_allocations returns all allocations
1002    /// Invariants: Should return all active allocations
1003    #[test]
1004    fn test_get_active_allocations() {
1005        let tracker = MemoryTracker::new();
1006        tracker.track_allocation(0x1000, 1024).unwrap();
1007        tracker.track_allocation(0x2000, 2048).unwrap();
1008        tracker.track_deallocation(0x1000).unwrap();
1009
1010        let allocations = tracker.get_active_allocations().unwrap();
1011        assert_eq!(allocations.len(), 1, "Should have 1 active allocation");
1012        assert_eq!(
1013            allocations[0].ptr, 0x2000,
1014            "Remaining allocation should be at 0x2000"
1015        );
1016    }
1017
1018    /// Objective: Verify configure_tracking_strategy logging
1019    /// Invariants: Should accept both strategies
1020    #[test]
1021    fn test_configure_tracking_strategy() {
1022        configure_tracking_strategy(false);
1023        assert_eq!(
1024            TRACKING_STRATEGY.load(Ordering::Relaxed),
1025            STRATEGY_GLOBAL_SINGLETON,
1026            "Should set global singleton strategy"
1027        );
1028
1029        configure_tracking_strategy(true);
1030        assert_eq!(
1031            TRACKING_STRATEGY.load(Ordering::Relaxed),
1032            STRATEGY_THREAD_LOCAL,
1033            "Should set thread-local strategy"
1034        );
1035    }
1036}