memscope_rs/core/tracker/
memory_tracker.rs

1//! Core memory tracking functionality.
2//!
3//! This module contains the main MemoryTracker struct and its basic methods
4//! for creating, configuring, and managing the memory tracking system.
5
6use crate::core::bounded_memory_stats::{
7    AllocationHistoryManager, BoundedMemoryStats, BoundedStatsConfig,
8};
9use crate::core::ownership_history::{HistoryConfig, OwnershipHistoryRecorder};
10use crate::core::safe_operations::SafeLock;
11use crate::core::types::{
12    AllocationInfo, DropChainNode, DropChainPerformanceMetrics, EnhancedPotentialLeak,
13    LeakEvidence, LeakEvidenceType, LeakImpact, LeakRiskLevel, LeakType, MemoryStats,
14    ResourceLeakAnalysis, TrackingError::LockError, TrackingResult,
15};
16
17use std::collections::HashMap;
18use std::sync::atomic::{AtomicU8, Ordering};
19use std::sync::{Arc, Mutex, OnceLock};
20
21/// Binary export mode enumeration for selecting export strategy
22#[derive(Debug, Clone, Copy, PartialEq, Eq)]
23pub enum BinaryExportMode {
24    /// Export only user-defined variables (strict filtering)
25    /// Results in smaller binary files (few KB) with faster processing
26    UserOnly,
27    /// Export all allocations including system allocations (loose filtering)  
28    /// Results in larger binary files (hundreds of KB) with complete data
29    Full,
30}
31
32impl Default for BinaryExportMode {
33    /// Default to UserOnly mode for backward compatibility
34    fn default() -> Self {
35        BinaryExportMode::UserOnly
36    }
37}
38
39/// Tracking strategy constants for dual-mode architecture
40const STRATEGY_GLOBAL_SINGLETON: u8 = 0;
41const STRATEGY_THREAD_LOCAL: u8 = 1;
42
43/// Global tracking strategy configuration
44static TRACKING_STRATEGY: AtomicU8 = AtomicU8::new(STRATEGY_GLOBAL_SINGLETON);
45
46/// Global memory tracker instance (for single-threaded mode)
47static GLOBAL_TRACKER: OnceLock<Arc<MemoryTracker>> = OnceLock::new();
48
49// Thread-local memory tracker instances (for concurrent mode)
50thread_local! {
51    static THREAD_LOCAL_TRACKER: Arc<MemoryTracker> = {
52        let tracker = Arc::new(MemoryTracker::new());
53        // Auto-register this thread's tracker when first accessed
54        crate::core::thread_registry::register_current_thread_tracker(&tracker);
55        tracker
56    };
57}
58
59/// Configure tracking strategy for the application.
60///
61/// This function should be called at program startup to set the appropriate
62/// tracking strategy based on whether the application is concurrent or not.
63///
64/// # Arguments
65/// * `is_concurrent` - true for multi-threaded/async applications, false for single-threaded
66pub fn configure_tracking_strategy(is_concurrent: bool) {
67    let strategy = if is_concurrent {
68        STRATEGY_THREAD_LOCAL
69    } else {
70        STRATEGY_GLOBAL_SINGLETON
71    };
72
73    TRACKING_STRATEGY.store(strategy, Ordering::Relaxed);
74
75    tracing::info!(
76        "Configured tracking strategy: {}",
77        if is_concurrent {
78            "thread-local"
79        } else {
80            "global-singleton"
81        }
82    );
83}
84
85/// Get the appropriate memory tracker based on current strategy.
86///
87/// This function implements the dual-mode dispatch:
88/// - In single-threaded mode: returns the global singleton tracker
89/// - In concurrent mode: returns the current thread's local tracker
90pub fn get_tracker() -> Arc<MemoryTracker> {
91    match TRACKING_STRATEGY.load(Ordering::Relaxed) {
92        STRATEGY_GLOBAL_SINGLETON => GLOBAL_TRACKER
93            .get_or_init(|| Arc::new(MemoryTracker::new()))
94            .clone(),
95        STRATEGY_THREAD_LOCAL => THREAD_LOCAL_TRACKER.with(|tracker| tracker.clone()),
96        _ => {
97            // Fallback to global singleton for unknown strategy
98            tracing::warn!("Unknown tracking strategy, falling back to global singleton");
99            GLOBAL_TRACKER
100                .get_or_init(|| Arc::new(MemoryTracker::new()))
101                .clone()
102        }
103    }
104}
105
106/// Get the global memory tracker instance (legacy compatibility).
107///
108/// This function is preserved for backward compatibility but now delegates to get_tracker().
109/// New code should use get_tracker() directly for dual-mode support.
110#[deprecated(note = "Use get_tracker() instead for dual-mode support")]
111pub fn get_global_tracker() -> Arc<MemoryTracker> {
112    get_tracker()
113}
114
115/// Core memory tracking functionality.
116///
117/// The MemoryTracker maintains records of all memory allocations and deallocations,
118/// provides statistics, and supports exporting data in various formats.
119pub struct MemoryTracker {
120    /// Active allocations (ptr -> allocation info)
121    pub(crate) active_allocations: Mutex<HashMap<usize, AllocationInfo>>,
122    /// Bounded memory statistics (prevents infinite growth)
123    pub(crate) bounded_stats: Mutex<BoundedMemoryStats>,
124    /// Separate allocation history manager (bounded)
125    pub(crate) history_manager: Mutex<AllocationHistoryManager>,
126    /// Ownership history recorder for detailed lifecycle tracking
127    pub(crate) ownership_history: Mutex<OwnershipHistoryRecorder>,
128    /// Legacy stats for compatibility (derived from bounded_stats)
129    pub(crate) stats: Mutex<MemoryStats>,
130    /// Fast mode flag for testing (reduces overhead)
131    pub(crate) fast_mode: std::sync::atomic::AtomicBool,
132}
133
134impl MemoryTracker {
135    /// Create a new memory tracker.
136    pub fn new() -> Self {
137        let fast_mode =
138            std::env::var("MEMSCOPE_TEST_MODE").is_ok() || cfg!(test) || cfg!(feature = "test");
139
140        // Configure bounded stats based on environment
141        let config = if fast_mode {
142            // Smaller limits for testing
143            BoundedStatsConfig {
144                max_recent_allocations: 1_000,
145                max_historical_summaries: 100,
146                enable_auto_cleanup: true,
147                cleanup_threshold: 0.8,
148            }
149        } else {
150            // Production limits
151            BoundedStatsConfig::default()
152        };
153
154        // Configure ownership history based on mode
155        let history_config = if fast_mode {
156            HistoryConfig {
157                max_events_per_allocation: 10,
158                track_borrowing: false,
159                track_cloning: true,
160                track_ownership_transfers: false,
161            }
162        } else {
163            HistoryConfig::default()
164        };
165
166        Self {
167            active_allocations: Mutex::new(HashMap::new()),
168            bounded_stats: Mutex::new(BoundedMemoryStats::with_config(config.clone())),
169            history_manager: Mutex::new(AllocationHistoryManager::with_config(config)),
170            ownership_history: Mutex::new(OwnershipHistoryRecorder::with_config(history_config)),
171            stats: Mutex::new(MemoryStats::default()),
172            fast_mode: std::sync::atomic::AtomicBool::new(fast_mode),
173        }
174    }
175
176    /// Get current memory statistics with advanced analysis.
177    pub fn get_stats(&self) -> TrackingResult<MemoryStats> {
178        // Get bounded stats using safe operations
179        let bounded_stats = self
180            .bounded_stats
181            .safe_lock()
182            .map(|stats| stats.clone())
183            .unwrap_or_else(|_| crate::core::bounded_memory_stats::BoundedMemoryStats::default());
184
185        // Get history for compatibility using safe operations
186        let _history = self
187            .history_manager
188            .safe_lock()
189            .map(|manager| manager.get_history_vec())
190            .unwrap_or_else(|_| Vec::new());
191
192        // Convert bounded stats to legacy MemoryStats for compatibility
193        let legacy_stats = MemoryStats {
194            total_allocations: bounded_stats.total_allocations,
195            total_allocated: bounded_stats.total_allocated,
196            active_allocations: bounded_stats.active_allocations,
197            active_memory: bounded_stats.active_memory,
198            peak_allocations: bounded_stats.peak_allocations,
199            peak_memory: bounded_stats.peak_memory,
200            total_deallocations: bounded_stats.total_deallocations,
201            total_deallocated: bounded_stats.total_deallocated,
202            leaked_allocations: bounded_stats.leaked_allocations,
203            leaked_memory: bounded_stats.leaked_memory,
204            fragmentation_analysis: bounded_stats.fragmentation_analysis.clone(),
205            lifecycle_stats: bounded_stats.lifecycle_stats.clone(),
206            system_library_stats: bounded_stats.system_library_stats.clone(),
207            concurrency_analysis: bounded_stats.concurrency_analysis.clone(),
208            // Use bounded allocations instead of infinite growth
209            allocations: bounded_stats.get_all_allocations(),
210        };
211
212        // Update the legacy stats cache using safe operations
213        if let Ok(mut stats) = self.stats.safe_lock() {
214            *stats = legacy_stats.clone();
215        }
216
217        Ok(legacy_stats)
218    }
219
220    /// Get all currently active allocations.
221    pub fn get_active_allocations(&self) -> TrackingResult<Vec<AllocationInfo>> {
222        self.active_allocations
223            .safe_lock()
224            .map(|active| active.values().cloned().collect())
225            .map_err(|e| LockError(format!("Failed to get active allocations: {e}",)))
226    }
227
228    /// Get the complete allocation history.
229    pub fn get_allocation_history(&self) -> TrackingResult<Vec<AllocationInfo>> {
230        self.history_manager
231            .safe_lock()
232            .map(|manager| manager.get_history_vec())
233            .map_err(|e| LockError(format!("Failed to get allocation history: {e}",)))
234    }
235
236    /// Enable or disable fast mode.
237    pub fn set_fast_mode(&self, enabled: bool) {
238        self.fast_mode
239            .store(enabled, std::sync::atomic::Ordering::Relaxed);
240    }
241
242    /// Check if fast mode is enabled.
243    pub fn is_fast_mode(&self) -> bool {
244        self.fast_mode.load(std::sync::atomic::Ordering::Relaxed)
245    }
246
247    /// Enable fast mode for testing
248    pub fn enable_fast_mode(&self) {
249        self.fast_mode
250            .store(true, std::sync::atomic::Ordering::Relaxed);
251    }
252
253    /// Export memory analysis visualization to SVG file.
254    /// All output files are automatically placed in the MemoryAnalysis/ directory.
255    ///
256    /// # Arguments
257    /// * `path` - Output filename for the memory analysis SVG file (recommended: "program_name_memory_analysis.svg")
258    pub fn export_memory_analysis<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
259        let output_path = self.ensure_memory_analysis_path(path);
260        crate::export::visualization::export_memory_analysis(self, output_path)
261    }
262
263    /// Ensure the memory analysis path exists and return the full path
264    pub fn ensure_memory_analysis_path<P: AsRef<std::path::Path>>(
265        &self,
266        path: P,
267    ) -> std::path::PathBuf {
268        let path = path.as_ref();
269        let memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
270
271        // Create directory if it doesn't exist
272        if let Err(e) = std::fs::create_dir_all(memory_analysis_dir) {
273            tracing::warn!("Failed to create MemoryAnalysis directory: {}", e);
274        }
275
276        memory_analysis_dir.join(path)
277    }
278
279    /// Export memory tracking data to binary format (.memscope file).
280    /// All output files are automatically placed in the MemoryAnalysis/ directory.
281    /// This method exports user-defined variables only (default behavior for compatibility).
282    ///
283    /// # Arguments
284    /// * `path` - Base filename for the binary export (extension .memscope will be added automatically)
285    ///
286    /// # Example
287    /// ```text
288    /// let tracker = get_global_tracker();
289    /// tracker.export_to_binary("my_program")?;
290    /// // Creates: MemoryAnalysis/my_program.memscope
291    /// ```
292    pub fn export_to_binary<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
293        // Maintain compatibility by defaulting to user-only export
294        self.export_user_binary(path)
295    }
296
297    /// Export memory tracking data to binary format with specified mode.
298    /// All output files are automatically placed in the MemoryAnalysis/ directory.
299    /// This method provides flexible export options for different use cases.
300    ///
301    /// # Arguments
302    /// * `path` - Base filename for the binary export (extension .memscope will be added automatically)
303    /// * `mode` - Export mode (UserOnly for small files, Full for complete data)
304    ///
305    /// # Example
306    /// ```text
307    /// let tracker = get_global_tracker();
308    ///
309    /// // Export only user variables (small, fast)
310    /// tracker.export_to_binary_with_mode("my_program_user", BinaryExportMode::UserOnly)?;
311    ///
312    /// // Export all data (large, complete)
313    /// tracker.export_to_binary_with_mode("my_program_full", BinaryExportMode::Full)?;
314    /// ```
315    pub fn export_to_binary_with_mode<P: AsRef<std::path::Path>>(
316        &self,
317        path: P,
318        mode: BinaryExportMode,
319    ) -> TrackingResult<()> {
320        match mode {
321            BinaryExportMode::UserOnly => {
322                tracing::info!("Using strict filtering for user-only binary export");
323                self.export_user_binary(path)
324            }
325            BinaryExportMode::Full => {
326                tracing::info!("Using loose filtering for full binary export");
327                self.export_full_binary(path)
328            }
329        }
330    }
331
332    /// Export only user-defined variables to binary format (.memscope file).
333    /// This method filters allocations to include only those with variable names,
334    /// resulting in smaller binary files and faster JSON conversion.
335    /// The binary file will contain only user-defined variables, not system allocations.
336    ///
337    /// # Arguments
338    /// * `path` - Base filename for the binary export (extension .memscope will be added automatically)
339    ///
340    /// # Example
341    /// ```text
342    /// let tracker = get_global_tracker();
343    /// tracker.export_user_binary("my_program_user")?;
344    /// // Creates: MemoryAnalysis/my_program_user.memscope (user variables only)
345    /// ```
346    pub fn export_user_binary<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
347        let output_path = self.ensure_memscope_path(path);
348
349        tracing::info!("Starting user binary export to: {}", output_path.display());
350
351        let all_allocations = self.get_active_allocations()?;
352
353        // Filter to user-defined variables only - this creates smaller binary files
354        // and matches the current JSON output behavior
355        let user_allocations: Vec<_> = all_allocations
356            .into_iter()
357            .filter(|allocation| allocation.var_name.is_some())
358            .collect();
359
360        tracing::info!(
361            "Filtered {} user allocations for export (excluding system allocations)",
362            user_allocations.len()
363        );
364
365        crate::export::binary::export_to_binary_with_mode(
366            &user_allocations,
367            output_path,
368            crate::export::binary::format::BinaryExportMode::UserOnly,
369            &crate::export::binary::BinaryExportConfig::default(),
370        )
371        .map_err(|e| crate::core::types::TrackingError::ExportError(e.to_string()))?;
372
373        tracing::info!("User binary export completed successfully");
374        Ok(())
375    }
376
377    /// Export all allocations (user + system) to binary format (.memscope file).
378    /// This method includes all tracked allocations with null field elimination
379    /// for optimal storage efficiency. Uses optimized processing for large datasets.
380    ///
381    /// # Arguments
382    /// * `path` - Base filename for the binary export (extension .memscope will be added automatically)
383    ///
384    /// # Example
385    /// ```text
386    /// let tracker = get_global_tracker();
387    /// tracker.export_full_binary("my_program_full")?;
388    /// // Creates: MemoryAnalysis/my_program_full.memscope
389    /// ```
390    pub fn export_full_binary<P: AsRef<std::path::Path>>(&self, path: P) -> TrackingResult<()> {
391        let output_path = self.ensure_memscope_path(path);
392
393        tracing::info!("Starting full binary export to: {}", output_path.display());
394
395        let all_allocations = self.get_active_allocations()?;
396
397        tracing::info!(
398            "Exporting {} total allocations (user + system)",
399            all_allocations.len()
400        );
401
402        // Export all allocations with enhanced header for full-binary mode
403        // This ensures complete data integrity without ambiguous null values
404        crate::export::binary::export_to_binary_with_mode(
405            &all_allocations,
406            output_path,
407            crate::export::binary::format::BinaryExportMode::Full,
408            &crate::export::binary::BinaryExportConfig::default(),
409        )
410        .map_err(|e| crate::core::types::TrackingError::ExportError(e.to_string()))?;
411
412        tracing::info!("Full binary export completed successfully");
413        Ok(())
414    }
415
416    /// Ensure path uses .memscope extension and is in MemoryAnalysis directory
417    fn ensure_memscope_path<P: AsRef<std::path::Path>>(&self, path: P) -> std::path::PathBuf {
418        let mut output_path = self.ensure_memory_analysis_path(path);
419
420        // Ensure .memscope extension
421        if output_path.extension().is_none()
422            || output_path.extension() != Some(std::ffi::OsStr::new("memscope"))
423        {
424            output_path.set_extension("memscope");
425        }
426
427        output_path
428    }
429
430    /// Convert binary file to standard JSON format (4 separate files)
431    ///
432    /// This method reads a .memscope binary file and generates the standard
433    /// 4-file JSON output format used by export_to_json.
434    ///
435    /// # Arguments
436    ///
437    /// * `binary_path` - Path to input .memscope file
438    /// * `base_name` - Base name for output files (will create 4 files with different suffixes)
439    ///
440    /// # Examples
441    ///
442    /// ```text
443    /// MemoryTracker::parse_binary_to_standard_json("data.memscope", "project_name")?;
444    /// ```
445    pub fn parse_binary_to_standard_json<P: AsRef<std::path::Path>>(
446        binary_path: P,
447        base_name: &str,
448    ) -> TrackingResult<()> {
449        crate::export::binary::BinaryParser::to_standard_json_files(binary_path, base_name)
450            .map_err(|e| crate::core::types::TrackingError::ExportError(e.to_string()))
451    }
452
453    /// Convert binary file to single JSON format (legacy compatibility)
454    ///
455    /// # Examples
456    ///
457    /// ```text
458    /// MemoryTracker::parse_binary_to_json("data.memscope", "data.json")?;
459    /// ```
460    pub fn parse_binary_to_json<P: AsRef<std::path::Path>>(
461        binary_path: P,
462        json_path: P,
463    ) -> TrackingResult<()> {
464        crate::export::binary::parse_binary_to_json(binary_path, json_path)
465            .map_err(|e| crate::core::types::TrackingError::ExportError(e.to_string()))
466    }
467
468    /// Convert binary file to HTML format
469    ///
470    /// This method reads a .memscope binary file and generates an HTML report
471    /// with memory allocation analysis and visualization.
472    ///
473    /// # Arguments
474    ///
475    /// * `binary_path` - Path to input .memscope file
476    /// * `html_path` - Path for output HTML file
477    ///
478    /// # Examples
479    ///
480    /// ```text
481    /// MemoryTracker::parse_binary_to_html("data.memscope", "report.html")?;
482    /// ```
483    pub fn parse_binary_to_html<P: AsRef<std::path::Path>>(
484        binary_path: P,
485        html_path: P,
486    ) -> TrackingResult<()> {
487        crate::export::binary::parse_binary_to_html(binary_path, html_path)
488            .map_err(|e| crate::core::types::TrackingError::ExportError(e.to_string()))
489    }
490
491    /// Alias for parse_binary_to_html for backward compatibility
492    pub fn export_binary_to_html<P: AsRef<std::path::Path>>(
493        binary_path: P,
494        html_path: P,
495    ) -> TrackingResult<()> {
496        Self::parse_binary_to_html(binary_path, html_path)
497    }
498
499    /// Export interactive lifecycle timeline showing variable lifecycles and relationships.
500    /// This creates an advanced timeline with variable birth, life, death, and cross-section interactivity.
501    /// All output files are automatically placed in the MemoryAnalysis/ directory.
502    ///
503    /// # Arguments
504    /// * `path` - Output filename for the lifecycle timeline SVG file (recommended: "program_name_lifecycle.svg")
505    pub fn export_lifecycle_timeline<P: AsRef<std::path::Path>>(
506        &self,
507        path: P,
508    ) -> TrackingResult<()> {
509        let output_path = self.ensure_memory_analysis_path(path);
510        crate::export::visualization::export_lifecycle_timeline(self, output_path)
511    }
512
513    /// Analyze drop chain for an object being deallocated
514    pub fn analyze_drop_chain(
515        &self,
516        ptr: usize,
517        type_name: &str,
518    ) -> Option<crate::core::types::DropChainAnalysis> {
519        let start_time = std::time::SystemTime::now()
520            .duration_since(std::time::UNIX_EPOCH)
521            .unwrap_or_default()
522            .as_nanos() as u64;
523
524        // Create root drop chain node
525        let root_node = self.create_drop_chain_node(ptr, type_name, start_time);
526
527        // Analyze ownership hierarchy
528        let ownership_hierarchy = self.analyze_ownership_hierarchy(ptr, type_name);
529
530        // Build complete drop sequence
531        let drop_sequence = self.build_drop_sequence(ptr, type_name, &ownership_hierarchy);
532
533        // Calculate performance metrics
534        let performance_metrics = self.calculate_drop_chain_performance(&drop_sequence);
535
536        // Detect resource leaks
537        let leak_detection = self.detect_resource_leaks(ptr, type_name, &ownership_hierarchy);
538
539        let end_time = std::time::SystemTime::now()
540            .duration_since(std::time::UNIX_EPOCH)
541            .unwrap_or_default()
542            .as_nanos() as u64;
543
544        Some(crate::core::types::DropChainAnalysis {
545            root_object: root_node,
546            drop_sequence,
547            total_duration_ns: end_time - start_time,
548            performance_metrics,
549            ownership_hierarchy,
550            leak_detection,
551        })
552    }
553
554    /// Create a drop chain node for an object
555    fn create_drop_chain_node(
556        &self,
557        ptr: usize,
558        type_name: &str,
559        timestamp: u64,
560    ) -> crate::core::types::DropChainNode {
561        let drop_impl_type = self.determine_drop_implementation_type(type_name);
562        let cleanup_actions = self.analyze_cleanup_actions(type_name);
563        let performance_characteristics = self.analyze_drop_performance_characteristics(type_name);
564
565        // Estimate drop duration based on type
566        let drop_duration_ns = self.estimate_drop_duration(type_name);
567
568        // Find child objects that will be dropped
569        let children = self.find_child_objects_for_drop(ptr, type_name);
570
571        crate::core::types::DropChainNode {
572            object_id: ptr,
573            type_name: type_name.to_string(),
574            drop_timestamp: timestamp,
575            drop_duration_ns,
576            children,
577            drop_impl_type,
578            cleanup_actions,
579            performance_characteristics,
580        }
581    }
582
583    /// Determine the type of Drop implementation for a type
584    fn determine_drop_implementation_type(
585        &self,
586        type_name: &str,
587    ) -> crate::core::types::DropImplementationType {
588        use crate::core::types::DropImplementationType;
589
590        if type_name.starts_with("Box<")
591            || type_name.starts_with("Rc<")
592            || type_name.starts_with("Arc<")
593        {
594            DropImplementationType::SmartPointer
595        } else if type_name.starts_with("Vec<")
596            || type_name.starts_with("HashMap<")
597            || type_name.starts_with("BTreeMap<")
598            || type_name.starts_with("HashSet<")
599        {
600            DropImplementationType::Collection
601        } else if type_name.contains("File")
602            || type_name.contains("Socket")
603            || type_name.contains("Handle")
604            || type_name.contains("Stream")
605        {
606            DropImplementationType::ResourceHandle
607        } else if self.is_copy_type(type_name) {
608            DropImplementationType::NoOp
609        } else if self.has_custom_drop_impl(type_name) {
610            DropImplementationType::Custom
611        } else {
612            DropImplementationType::Automatic
613        }
614    }
615
616    /// Check if a type is Copy (no-op drop)
617    fn is_copy_type(&self, type_name: &str) -> bool {
618        matches!(
619            type_name,
620            "i8" | "i16"
621                | "i32"
622                | "i64"
623                | "i128"
624                | "isize"
625                | "u8"
626                | "u16"
627                | "u32"
628                | "u64"
629                | "u128"
630                | "usize"
631                | "f32"
632                | "f64"
633                | "bool"
634                | "char"
635                | "&str"
636                | "&[u8]"
637        ) || type_name.starts_with("&")
638            || type_name.starts_with("*")
639    }
640
641    /// Check if a type has a custom Drop implementation
642    fn has_custom_drop_impl(&self, type_name: &str) -> bool {
643        // In a real implementation, this would check for Drop trait implementations
644        // For now, use heuristics based on common patterns
645        type_name.contains("Guard")
646            || type_name.contains("Lock")
647            || type_name.contains("Mutex")
648            || type_name.contains("RwLock")
649            || type_name.contains("Channel")
650            || type_name.contains("Receiver")
651            || type_name.contains("Sender")
652    }
653
654    /// Analyze cleanup actions for a type
655    fn analyze_cleanup_actions(&self, type_name: &str) -> Vec<crate::core::types::CleanupAction> {
656        use crate::core::types::{CleanupAction, CleanupActionType};
657
658        let mut actions = Vec::new();
659        let timestamp = std::time::SystemTime::now()
660            .duration_since(std::time::UNIX_EPOCH)
661            .unwrap_or_default()
662            .as_nanos() as u64;
663
664        if type_name.starts_with("Box<") || type_name.starts_with("Vec<") {
665            actions.push(CleanupAction {
666                action_type: CleanupActionType::MemoryDeallocation,
667                timestamp,
668                duration_ns: 100, // Estimated
669                resource_description: format!("Heap memory for {type_name}"),
670                success: true,
671            });
672        }
673
674        if type_name.contains("File") {
675            actions.push(CleanupAction {
676                action_type: CleanupActionType::FileHandleClosure,
677                timestamp,
678                duration_ns: 1000, // File operations are slower
679                resource_description: "File handle closure".to_string(),
680                success: true,
681            });
682        }
683
684        if type_name.contains("Socket") || type_name.contains("TcpStream") {
685            actions.push(CleanupAction {
686                action_type: CleanupActionType::NetworkConnectionClosure,
687                timestamp,
688                duration_ns: 5000, // Network operations can be slow
689                resource_description: "Network connection closure".to_string(),
690                success: true,
691            });
692        }
693
694        if type_name.contains("Mutex") || type_name.contains("RwLock") {
695            actions.push(CleanupAction {
696                action_type: CleanupActionType::LockRelease,
697                timestamp,
698                duration_ns: 50, // Lock operations are fast
699                resource_description: format!("Lock release for {type_name}"),
700                success: true,
701            });
702        }
703
704        if type_name.starts_with("Rc<") || type_name.starts_with("Arc<") {
705            actions.push(CleanupAction {
706                action_type: CleanupActionType::ReferenceCountDecrement,
707                timestamp,
708                duration_ns: 20, // Atomic operations are very fast
709                resource_description: "Reference count decrement".to_string(),
710                success: true,
711            });
712        }
713
714        actions
715    }
716
717    /// Analyze drop performance characteristics
718    fn analyze_drop_performance_characteristics(
719        &self,
720        type_name: &str,
721    ) -> crate::core::types::DropPerformanceCharacteristics {
722        use crate::core::types::{DropPerformanceCharacteristics, ImpactLevel};
723
724        let (execution_time_ns, cpu_usage, memory_ops, io_ops, syscalls, impact) =
725            if type_name.starts_with("Vec<") || type_name.starts_with("HashMap<") {
726                (1000, 5.0, 10, 0, 1, ImpactLevel::Low)
727            } else if type_name.contains("File") || type_name.contains("Socket") {
728                (10000, 2.0, 1, 5, 3, ImpactLevel::Medium)
729            } else if type_name.contains("Mutex") || type_name.contains("RwLock") {
730                (100, 1.0, 0, 0, 1, ImpactLevel::Low)
731            } else if self.has_custom_drop_impl(type_name) {
732                (5000, 10.0, 5, 2, 2, ImpactLevel::Medium)
733            } else {
734                (50, 0.5, 1, 0, 0, ImpactLevel::Low)
735            };
736
737        DropPerformanceCharacteristics {
738            execution_time_ns,
739            cpu_usage_percent: cpu_usage,
740            memory_operations: memory_ops,
741            io_operations: io_ops,
742            system_calls: syscalls,
743            impact_level: impact,
744        }
745    }
746
747    /// Estimate drop duration for a type
748    fn estimate_drop_duration(&self, type_name: &str) -> u64 {
749        if type_name.starts_with("Vec<") {
750            // Vec drop time depends on element count and element drop time
751            1000 // Base estimate in nanoseconds
752        } else if type_name.starts_with("HashMap<") {
753            2000 // HashMap is more complex
754        } else if type_name.contains("File") {
755            10000 // File operations are slow
756        } else if type_name.contains("Socket") {
757            15000 // Network operations are slower
758        } else if type_name.starts_with("Box<") {
759            500 // Simple heap deallocation
760        } else if self.has_custom_drop_impl(type_name) {
761            5000 // Custom drop implementations vary
762        } else {
763            100 // Simple automatic drop
764        }
765    }
766
767    /// Find child objects that will be dropped as part of this object's drop
768    fn find_child_objects_for_drop(
769        &self,
770        ptr: usize,
771        type_name: &str,
772    ) -> Vec<crate::core::types::DropChainNode> {
773        let mut children = Vec::new();
774        let timestamp = std::time::SystemTime::now()
775            .duration_since(std::time::UNIX_EPOCH)
776            .unwrap_or_default()
777            .as_nanos() as u64;
778
779        // For collections, simulate child elements
780        if type_name.starts_with("Vec<") {
781            // Extract element type
782            if let Some(element_type) = self.extract_generic_type(type_name, "Vec") {
783                // Simulate a few child elements
784                for i in 0..3 {
785                    let child_ptr = ptr + (i * 8); // Simulate element addresses
786                    children.push(self.create_drop_chain_node(
787                        child_ptr,
788                        &element_type,
789                        timestamp + i as u64 * 100,
790                    ));
791                }
792            }
793        }
794
795        children
796    }
797
798    /// Analyze ownership hierarchy for an object
799    fn analyze_ownership_hierarchy(
800        &self,
801        ptr: usize,
802        type_name: &str,
803    ) -> crate::core::types::OwnershipHierarchy {
804        use crate::core::types::{OwnershipHierarchy, OwnershipNode};
805
806        // Create root ownership node
807        let ownership_type = self.determine_ownership_type(type_name);
808        let root_node = OwnershipNode {
809            object_id: ptr,
810            type_name: type_name.to_string(),
811            ownership_type,
812            owned_objects: self.find_owned_objects(ptr, type_name),
813            reference_count: self.get_reference_count_for_type(type_name),
814            weak_reference_count: self.get_weak_reference_count_for_type(type_name),
815        };
816
817        OwnershipHierarchy {
818            root_owners: vec![root_node],
819            max_depth: self.calculate_ownership_depth(ptr, type_name),
820            total_objects: self.count_owned_objects(ptr, type_name),
821            transfer_events: self.collect_ownership_transfer_events(ptr),
822            weak_references: self.collect_weak_references(ptr),
823            circular_references: self.detect_circular_references(ptr),
824        }
825    }
826
827    /// Build complete drop sequence
828    fn build_drop_sequence(
829        &self,
830        ptr: usize,
831        type_name: &str,
832        _hierarchy: &crate::core::types::OwnershipHierarchy,
833    ) -> Vec<crate::core::types::DropChainNode> {
834        let mut sequence = Vec::new();
835        let timestamp = std::time::SystemTime::now()
836            .duration_since(std::time::UNIX_EPOCH)
837            .unwrap_or_default()
838            .as_nanos() as u64;
839
840        // Add root object
841        sequence.push(self.create_drop_chain_node(ptr, type_name, timestamp));
842
843        // Add child objects based on type
844        if type_name.starts_with("Vec<") {
845            if let Some(element_type) = self.extract_generic_type(type_name, "Vec") {
846                for i in 0..3 {
847                    let child_ptr = ptr + (i * 8);
848                    sequence.push(self.create_drop_chain_node(
849                        child_ptr,
850                        &element_type,
851                        timestamp + (i as u64 + 1) * 100,
852                    ));
853                }
854            }
855        }
856
857        sequence
858    }
859
860    /// Calculate drop chain performance metrics
861    fn calculate_drop_chain_performance(
862        &self,
863        drop_sequence: &[DropChainNode],
864    ) -> DropChainPerformanceMetrics {
865        let total_objects = drop_sequence.len();
866        let max_depth = self.calculate_drop_chain_depth(drop_sequence);
867        let total_time: u64 = drop_sequence.iter().map(|node| node.drop_duration_ns).sum();
868        let avg_drop_time = if total_objects > 0 {
869            total_time as f64 / total_objects as f64
870        } else {
871            0.0
872        };
873        let slowest_drop = drop_sequence
874            .iter()
875            .map(|node| node.drop_duration_ns)
876            .max()
877            .unwrap_or(0);
878
879        // Calculate efficiency score based on performance characteristics
880        let efficiency_score = self.calculate_drop_efficiency_score(drop_sequence);
881
882        // Identify bottlenecks
883        let bottlenecks = self.identify_drop_bottlenecks(drop_sequence);
884
885        DropChainPerformanceMetrics {
886            total_objects,
887            max_depth,
888            avg_drop_time_ns: avg_drop_time,
889            slowest_drop_ns: slowest_drop,
890            efficiency_score,
891            bottlenecks,
892        }
893    }
894
895    /// Detect potential resource leaks
896    fn detect_resource_leaks(
897        &self,
898        ptr: usize,
899        type_name: &str,
900        _hierarchy: &crate::core::types::OwnershipHierarchy,
901    ) -> ResourceLeakAnalysis {
902        let mut potential_leaks = Vec::new();
903
904        // Check for common leak patterns
905        if type_name.contains("Rc<") && self.has_potential_cycle(ptr) {
906            let evidence = vec![LeakEvidence {
907                evidence_type: LeakEvidenceType::CircularReference,
908                description: "Potential circular reference in Rc structure".to_string(),
909                strength: 75.0,
910                timestamp: std::time::SystemTime::now()
911                    .duration_since(std::time::UNIX_EPOCH)
912                    .unwrap_or_default()
913                    .as_nanos() as u64,
914            }];
915
916            potential_leaks.push(EnhancedPotentialLeak {
917                object_id: ptr,
918                leak_type: LeakType::ReferenceCycle,
919                risk_level: LeakRiskLevel::High,
920                evidence,
921                estimated_impact: LeakImpact {
922                    memory_bytes: self.estimate_type_size(type_name),
923                    performance_impact_percent: 5.0,
924                    resource_count: 1,
925                    time_to_critical_hours: Some(24.0),
926                },
927            });
928        }
929
930        if type_name.contains("File") && !self.has_explicit_close(ptr) {
931            let evidence = vec![LeakEvidence {
932                evidence_type: LeakEvidenceType::ResourceNotClosed,
933                description: "File handle may not be explicitly closed".to_string(),
934                strength: 60.0,
935                timestamp: std::time::SystemTime::now()
936                    .duration_since(std::time::UNIX_EPOCH)
937                    .unwrap_or_default()
938                    .as_nanos() as u64,
939            }];
940
941            potential_leaks.push(EnhancedPotentialLeak {
942                object_id: ptr,
943                leak_type: LeakType::FileHandle,
944                risk_level: LeakRiskLevel::Medium,
945                evidence,
946                estimated_impact: LeakImpact {
947                    memory_bytes: 1024, // File handle overhead
948                    performance_impact_percent: 2.0,
949                    resource_count: 1,
950                    time_to_critical_hours: Some(72.0),
951                },
952            });
953        }
954
955        ResourceLeakAnalysis {
956            potential_leaks,
957            detection_confidence: 0.7, // 70% confidence in simplified implementation
958            usage_patterns: Vec::new(), // Would be populated in full implementation
959            prevention_recommendations: self.generate_leak_prevention_recommendations(type_name),
960        }
961    }
962
963    /// Check if an object has potential circular references
964    fn has_potential_cycle(&self, _ptr: usize) -> bool {
965        // Simplified heuristic - in real implementation would do graph traversal
966        rand::random::<f64>() < 0.1 // 10% chance for demonstration
967    }
968
969    // Private helper methods for drop chain analysis
970
971    /// Extract generic type from a generic type name
972    fn extract_generic_type(&self, type_name: &str, _container: &str) -> Option<String> {
973        if let Some(start) = type_name.find('<') {
974            if let Some(end) = type_name.rfind('>') {
975                let inner = &type_name[start + 1..end];
976                Some(inner.to_string())
977            } else {
978                None
979            }
980        } else {
981            None
982        }
983    }
984
985    /// Determine ownership type for a type
986    fn determine_ownership_type(&self, type_name: &str) -> crate::core::types::OwnershipType {
987        use crate::core::types::OwnershipType;
988
989        if type_name.starts_with("Box<") {
990            OwnershipType::Unique
991        } else if type_name.starts_with("Rc<") {
992            OwnershipType::SharedSingleThreaded
993        } else if type_name.starts_with("Arc<") {
994            OwnershipType::SharedMultiThreaded
995        } else if type_name.starts_with("&") {
996            OwnershipType::Borrowed
997        } else if type_name.contains("Weak") {
998            OwnershipType::Weak
999        } else if type_name.starts_with("*") {
1000            OwnershipType::Raw
1001        } else {
1002            OwnershipType::Unique
1003        }
1004    }
1005
1006    /// Find objects owned by this object
1007    fn find_owned_objects(
1008        &self,
1009        _ptr: usize,
1010        type_name: &str,
1011    ) -> Vec<crate::core::types::OwnershipNode> {
1012        use crate::core::types::{OwnershipNode, OwnershipType};
1013
1014        let mut owned = Vec::new();
1015
1016        // For collections, simulate owned elements
1017        if type_name.starts_with("Vec<") {
1018            if let Some(element_type) = self.extract_generic_type(type_name, "Vec") {
1019                // Simulate a few owned elements
1020                for i in 0..2 {
1021                    owned.push(OwnershipNode {
1022                        object_id: 1000 + i,
1023                        type_name: element_type.clone(),
1024                        ownership_type: OwnershipType::Unique,
1025                        owned_objects: Vec::new(),
1026                        reference_count: None,
1027                        weak_reference_count: None,
1028                    });
1029                }
1030            }
1031        }
1032
1033        owned
1034    }
1035
1036    /// Get reference count for reference-counted types
1037    fn get_reference_count_for_type(&self, type_name: &str) -> Option<usize> {
1038        if type_name.starts_with("Rc<") || type_name.starts_with("Arc<") {
1039            Some(1) // Simplified - in real implementation would track actual counts
1040        } else {
1041            None
1042        }
1043    }
1044
1045    /// Get weak reference count for reference-counted types
1046    fn get_weak_reference_count_for_type(&self, type_name: &str) -> Option<usize> {
1047        if type_name.starts_with("Rc<") || type_name.starts_with("Arc<") {
1048            Some(0) // Simplified
1049        } else {
1050            None
1051        }
1052    }
1053
1054    /// Calculate ownership hierarchy depth
1055    fn calculate_ownership_depth(&self, _ptr: usize, type_name: &str) -> usize {
1056        if type_name.starts_with("Vec<")
1057            || type_name.starts_with("HashMap<")
1058            || type_name.starts_with("Box<")
1059        {
1060            2 // Collection/Box + elements/boxed value
1061        } else {
1062            1 // Simple object
1063        }
1064    }
1065
1066    /// Count total objects in ownership hierarchy
1067    fn count_owned_objects(&self, _ptr: usize, type_name: &str) -> usize {
1068        if type_name.starts_with("Vec<") {
1069            5 // Simulate 5 elements
1070        } else if type_name.starts_with("HashMap<") {
1071            8 // Simulate 8 key-value pairs
1072        } else if type_name.starts_with("Box<") {
1073            2 // Box + boxed value
1074        } else {
1075            1
1076        }
1077    }
1078
1079    /// Collect ownership transfer events
1080    fn collect_ownership_transfer_events(
1081        &self,
1082        _ptr: usize,
1083    ) -> Vec<crate::core::types::OwnershipTransferEvent> {
1084        // In a real implementation, this would track actual transfer events
1085        // For now, return empty vector
1086        Vec::new()
1087    }
1088
1089    /// Collect weak references
1090    fn collect_weak_references(&self, _ptr: usize) -> Vec<crate::core::types::WeakReferenceInfo> {
1091        // In a real implementation, this would track actual weak references
1092        Vec::new()
1093    }
1094
1095    /// Detect circular references
1096    fn detect_circular_references(
1097        &self,
1098        _ptr: usize,
1099    ) -> Vec<crate::core::types::CircularReferenceInfo> {
1100        // In a real implementation, this would perform cycle detection
1101        Vec::new()
1102    }
1103
1104    /// Calculate drop chain depth
1105    fn calculate_drop_chain_depth(
1106        &self,
1107        drop_sequence: &[crate::core::types::DropChainNode],
1108    ) -> usize {
1109        drop_sequence
1110            .iter()
1111            .map(|node| self.calculate_node_depth(node, 0))
1112            .max()
1113            .unwrap_or(0)
1114    }
1115
1116    /// Calculate depth of a single node
1117    #[allow(clippy::only_used_in_recursion)]
1118    fn calculate_node_depth(
1119        &self,
1120        node: &crate::core::types::DropChainNode,
1121        current_depth: usize,
1122    ) -> usize {
1123        if node.children.is_empty() {
1124            current_depth + 1
1125        } else {
1126            node.children
1127                .iter()
1128                .map(|child| self.calculate_node_depth(child, current_depth + 1))
1129                .max()
1130                .unwrap_or(current_depth + 1)
1131        }
1132    }
1133
1134    /// Calculate drop efficiency score
1135    fn calculate_drop_efficiency_score(
1136        &self,
1137        drop_sequence: &[crate::core::types::DropChainNode],
1138    ) -> f64 {
1139        if drop_sequence.is_empty() {
1140            return 100.0;
1141        }
1142
1143        let total_time: u64 = drop_sequence.iter().map(|node| node.drop_duration_ns).sum();
1144        let object_count = drop_sequence.len() as u64;
1145
1146        // Efficiency is inversely related to average drop time
1147        // Good efficiency: < 1000ns per object = 100 points
1148        // Poor efficiency: > 10000ns per object = 0 points
1149        let avg_time_per_object = total_time / object_count;
1150        let efficiency = if avg_time_per_object < 1000 {
1151            100.0
1152        } else if avg_time_per_object > 10000 {
1153            0.0
1154        } else {
1155            100.0 - ((avg_time_per_object - 1000) as f64 / 9000.0) * 100.0
1156        };
1157
1158        efficiency.clamp(0.0, 100.0)
1159    }
1160
1161    /// Identify drop performance bottlenecks
1162    fn identify_drop_bottlenecks(
1163        &self,
1164        drop_sequence: &[crate::core::types::DropChainNode],
1165    ) -> Vec<crate::core::types::DropPerformanceBottleneck> {
1166        use crate::core::types::{DropBottleneckType, DropPerformanceBottleneck, ImpactLevel};
1167
1168        let mut bottlenecks = Vec::new();
1169        let avg_time = if !drop_sequence.is_empty() {
1170            drop_sequence
1171                .iter()
1172                .map(|node| node.drop_duration_ns)
1173                .sum::<u64>()
1174                / drop_sequence.len() as u64
1175        } else {
1176            0
1177        };
1178
1179        for node in drop_sequence {
1180            // Identify slow drops
1181            if node.drop_duration_ns > avg_time * 3 {
1182                let severity = if node.drop_duration_ns > 50000 {
1183                    ImpactLevel::High
1184                } else if node.drop_duration_ns > 10000 {
1185                    ImpactLevel::Medium
1186                } else {
1187                    ImpactLevel::Low
1188                };
1189
1190                let bottleneck_type = if node.type_name.contains("File")
1191                    || node.type_name.contains("Socket")
1192                {
1193                    DropBottleneckType::ResourceHandleDelay
1194                } else if node.type_name.starts_with("Vec<")
1195                    || node.type_name.starts_with("HashMap<")
1196                {
1197                    DropBottleneckType::LargeCollectionCleanup
1198                } else if node.drop_impl_type == crate::core::types::DropImplementationType::Custom
1199                {
1200                    DropBottleneckType::SlowCustomDrop
1201                } else {
1202                    DropBottleneckType::DeepOwnershipHierarchy
1203                };
1204
1205                bottlenecks.push(DropPerformanceBottleneck {
1206                    object_id: node.object_id,
1207                    bottleneck_type: bottleneck_type.clone(),
1208                    severity,
1209                    description: format!(
1210                        "Drop of {} took {}ns, significantly above average of {}ns",
1211                        node.type_name, node.drop_duration_ns, avg_time
1212                    ),
1213                    optimization_suggestion: self
1214                        .get_drop_optimization_suggestion(&bottleneck_type),
1215                });
1216            }
1217        }
1218
1219        bottlenecks
1220    }
1221
1222    /// Get optimization suggestion for a drop bottleneck type
1223    fn get_drop_optimization_suggestion(
1224        &self,
1225        bottleneck_type: &crate::core::types::DropBottleneckType,
1226    ) -> String {
1227        use crate::core::types::DropBottleneckType;
1228
1229        match bottleneck_type {
1230            DropBottleneckType::SlowCustomDrop => {
1231                "Consider optimizing custom Drop implementation or using async cleanup".to_string()
1232            }
1233            DropBottleneckType::DeepOwnershipHierarchy => {
1234                "Consider flattening ownership hierarchy or using weak references".to_string()
1235            }
1236            DropBottleneckType::LargeCollectionCleanup => {
1237                "Consider using Vec::clear() before drop or implementing custom cleanup".to_string()
1238            }
1239            DropBottleneckType::ResourceHandleDelay => {
1240                "Consider async resource cleanup or connection pooling".to_string()
1241            }
1242            DropBottleneckType::LockContention => {
1243                "Consider reducing lock scope or using lock-free data structures".to_string()
1244            }
1245            DropBottleneckType::MemoryFragmentation => {
1246                "Consider using memory pools or custom allocators".to_string()
1247            }
1248        }
1249    }
1250
1251    /// Generate leak prevention recommendations
1252    fn generate_leak_prevention_recommendations(
1253        &self,
1254        type_name: &str,
1255    ) -> Vec<crate::core::types::LeakPreventionRecommendation> {
1256        use crate::core::types::{LeakPreventionRecommendation, LeakPreventionType, Priority};
1257
1258        let mut recommendations = Vec::new();
1259
1260        if type_name.contains("Rc<") {
1261            recommendations.push(LeakPreventionRecommendation {
1262                recommendation_type: LeakPreventionType::UseWeakReferences,
1263                priority: Priority::High,
1264                description: "Use Weak references to break potential cycles in Rc structures"
1265                    .to_string(),
1266                implementation_guidance: "Replace some Rc references with Weak where appropriate"
1267                    .to_string(),
1268                expected_effectiveness: 0.9,
1269            });
1270        }
1271
1272        if type_name.contains("File") || type_name.contains("Socket") {
1273            recommendations.push(LeakPreventionRecommendation {
1274                recommendation_type: LeakPreventionType::UseRAII,
1275                priority: Priority::High,
1276                description: "Ensure proper RAII patterns for resource cleanup".to_string(),
1277                implementation_guidance: "Use Drop trait or scoped guards for automatic cleanup"
1278                    .to_string(),
1279                expected_effectiveness: 0.95,
1280            });
1281        }
1282
1283        recommendations.push(LeakPreventionRecommendation {
1284            recommendation_type: LeakPreventionType::ResourceMonitoring,
1285            priority: Priority::Medium,
1286            description: "Implement resource usage monitoring".to_string(),
1287            implementation_guidance: "Add metrics and alerts for resource usage patterns"
1288                .to_string(),
1289            expected_effectiveness: 0.7,
1290        });
1291
1292        recommendations
1293    }
1294
1295    /// Check if a resource has explicit close handling
1296    fn has_explicit_close(&self, _ptr: usize) -> bool {
1297        // Simplified heuristic - in real implementation would track close calls
1298        rand::random::<f64>() < 0.8 // 80% chance of proper closure
1299    }
1300}
1301
1302impl Default for MemoryTracker {
1303    fn default() -> Self {
1304        Self::new()
1305    }
1306}
1307
1308impl Drop for MemoryTracker {
1309    fn drop(&mut self) {
1310        // Optional verbose tip for users
1311        if std::env::var("MEMSCOPE_VERBOSE").is_ok() {
1312            tracing::info!("💡 Tip: Use tracker.export_to_json() or tracker.export_interactive_dashboard() before drop to save analysis results");
1313        }
1314
1315        // Clean up any remaining allocations
1316        if let Ok(mut active) = self.active_allocations.lock() {
1317            active.clear();
1318        }
1319    }
1320}
1321
1322#[cfg(test)]
1323mod tests {
1324    use super::*;
1325    use std::sync::Arc;
1326
1327    #[test]
1328    fn test_memory_tracker_creation() {
1329        let tracker = MemoryTracker::new();
1330
1331        // Test that tracker is created with default values
1332        assert!(
1333            !tracker.is_fast_mode() || std::env::var("MEMSCOPE_TEST_MODE").is_ok() || cfg!(test)
1334        );
1335
1336        // Test that we can get stats without errors
1337        let stats_result = tracker.get_stats();
1338        assert!(stats_result.is_ok());
1339    }
1340
1341    #[test]
1342    fn test_fast_mode_toggle() {
1343        let tracker = MemoryTracker::new();
1344
1345        // Test enabling fast mode
1346        tracker.set_fast_mode(true);
1347        assert!(tracker.is_fast_mode());
1348
1349        // Test disabling fast mode
1350        tracker.set_fast_mode(false);
1351        assert!(!tracker.is_fast_mode());
1352
1353        // Test enable_fast_mode method
1354        tracker.enable_fast_mode();
1355        assert!(tracker.is_fast_mode());
1356    }
1357
1358    #[test]
1359    fn test_get_active_allocations() {
1360        let tracker = MemoryTracker::new();
1361        tracker.enable_fast_mode();
1362
1363        // Initially should be empty
1364        let allocations = tracker.get_active_allocations();
1365        assert!(allocations.is_ok());
1366        assert_eq!(allocations.unwrap().len(), 0);
1367    }
1368
1369    #[test]
1370    fn test_get_allocation_history() {
1371        let tracker = MemoryTracker::new();
1372        tracker.enable_fast_mode();
1373
1374        // Initially should be empty
1375        let history = tracker.get_allocation_history();
1376        assert!(history.is_ok());
1377        assert_eq!(history.unwrap().len(), 0);
1378    }
1379
1380    #[test]
1381    fn test_memory_analysis_path_creation() {
1382        let tracker = MemoryTracker::new();
1383
1384        let path = tracker.ensure_memory_analysis_path("test.svg");
1385        assert!(path.to_string_lossy().contains("MemoryAnalysis"));
1386        assert!(path.to_string_lossy().ends_with("test.svg"));
1387    }
1388
1389    #[test]
1390    fn test_memscope_path_creation() {
1391        let tracker = MemoryTracker::new();
1392
1393        let path = tracker.ensure_memscope_path("test");
1394        assert!(path.to_string_lossy().contains("MemoryAnalysis"));
1395        assert!(path.to_string_lossy().ends_with(".memscope"));
1396
1397        let path_with_ext = tracker.ensure_memscope_path("test.memscope");
1398        assert!(path_with_ext.to_string_lossy().ends_with(".memscope"));
1399    }
1400
1401    #[test]
1402    fn test_binary_export_mode_default() {
1403        let mode = BinaryExportMode::default();
1404        assert_eq!(mode, BinaryExportMode::UserOnly);
1405    }
1406
1407    #[test]
1408    fn test_binary_export_mode_variants() {
1409        // Test that enum variants are different
1410        assert_ne!(
1411            std::mem::discriminant(&BinaryExportMode::UserOnly),
1412            std::mem::discriminant(&BinaryExportMode::Full)
1413        );
1414    }
1415
1416    #[test]
1417    fn test_global_tracker_singleton() {
1418        let tracker1 = get_tracker();
1419        let tracker2 = get_tracker();
1420
1421        // Should be the same instance (Arc comparison)
1422        assert!(Arc::ptr_eq(&tracker1, &tracker2));
1423    }
1424
1425    #[test]
1426    fn test_drop_chain_analysis_basic() {
1427        let tracker = MemoryTracker::new();
1428        tracker.enable_fast_mode();
1429
1430        let analysis = tracker.analyze_drop_chain(0x1000, "Vec<i32>");
1431        assert!(analysis.is_some());
1432
1433        let analysis = analysis.unwrap();
1434        assert_eq!(analysis.root_object.object_id, 0x1000);
1435        assert_eq!(analysis.root_object.type_name, "Vec<i32>");
1436        assert!(analysis.total_duration_ns > 0);
1437    }
1438
1439    #[test]
1440    fn test_drop_implementation_type_detection() {
1441        let tracker = MemoryTracker::new();
1442
1443        // Test smart pointer detection
1444        let box_type = tracker.determine_drop_implementation_type("Box<i32>");
1445        assert_eq!(
1446            box_type,
1447            crate::core::types::DropImplementationType::SmartPointer
1448        );
1449
1450        // Test collection detection
1451        let vec_type = tracker.determine_drop_implementation_type("Vec<i32>");
1452        assert_eq!(
1453            vec_type,
1454            crate::core::types::DropImplementationType::Collection
1455        );
1456
1457        // Test resource handle detection
1458        let file_type = tracker.determine_drop_implementation_type("File");
1459        assert_eq!(
1460            file_type,
1461            crate::core::types::DropImplementationType::ResourceHandle
1462        );
1463
1464        // Test copy type detection
1465        let int_type = tracker.determine_drop_implementation_type("i32");
1466        assert_eq!(int_type, crate::core::types::DropImplementationType::NoOp);
1467    }
1468
1469    #[test]
1470    fn test_copy_type_detection() {
1471        let tracker = MemoryTracker::new();
1472
1473        assert!(tracker.is_copy_type("i32"));
1474        assert!(tracker.is_copy_type("u64"));
1475        assert!(tracker.is_copy_type("f32"));
1476        assert!(tracker.is_copy_type("bool"));
1477        assert!(tracker.is_copy_type("char"));
1478        assert!(tracker.is_copy_type("&str"));
1479
1480        assert!(!tracker.is_copy_type("String"));
1481        assert!(!tracker.is_copy_type("Vec<i32>"));
1482        assert!(!tracker.is_copy_type("HashMap<String, i32>"));
1483    }
1484
1485    #[test]
1486    fn test_custom_drop_detection() {
1487        let tracker = MemoryTracker::new();
1488
1489        assert!(tracker.has_custom_drop_impl("MutexGuard"));
1490        assert!(tracker.has_custom_drop_impl("RwLockWriteGuard"));
1491        assert!(tracker.has_custom_drop_impl("Receiver<i32>"));
1492        assert!(tracker.has_custom_drop_impl("Sender<String>"));
1493
1494        assert!(!tracker.has_custom_drop_impl("i32"));
1495        assert!(!tracker.has_custom_drop_impl("String"));
1496    }
1497
1498    #[test]
1499    fn test_drop_duration_estimation() {
1500        let tracker = MemoryTracker::new();
1501
1502        // Vec should have reasonable drop time
1503        let vec_duration = tracker.estimate_drop_duration("Vec<i32>");
1504        assert!(vec_duration > 0);
1505        assert!(vec_duration < 10000); // Should be reasonable
1506
1507        // File operations should be slower
1508        let file_duration = tracker.estimate_drop_duration("File");
1509        assert!(file_duration > vec_duration);
1510
1511        // Simple types should be fastest
1512        let simple_duration = tracker.estimate_drop_duration("i32");
1513        assert!(simple_duration < vec_duration);
1514    }
1515
1516    #[test]
1517    fn test_ownership_type_determination() {
1518        let tracker = MemoryTracker::new();
1519
1520        assert_eq!(
1521            tracker.determine_ownership_type("Box<i32>"),
1522            crate::core::types::OwnershipType::Unique
1523        );
1524        assert_eq!(
1525            tracker.determine_ownership_type("Rc<i32>"),
1526            crate::core::types::OwnershipType::SharedSingleThreaded
1527        );
1528        assert_eq!(
1529            tracker.determine_ownership_type("Arc<i32>"),
1530            crate::core::types::OwnershipType::SharedMultiThreaded
1531        );
1532        assert_eq!(
1533            tracker.determine_ownership_type("&i32"),
1534            crate::core::types::OwnershipType::Borrowed
1535        );
1536        assert_eq!(
1537            tracker.determine_ownership_type("Weak<i32>"),
1538            crate::core::types::OwnershipType::Weak
1539        );
1540    }
1541
1542    #[test]
1543    fn test_ownership_depth_calculation() {
1544        let tracker = MemoryTracker::new();
1545
1546        // Collections should have depth > 1
1547        assert!(tracker.calculate_ownership_depth(0x1000, "Vec<i32>") > 1);
1548        assert!(tracker.calculate_ownership_depth(0x1000, "HashMap<String, i32>") > 1);
1549        assert!(tracker.calculate_ownership_depth(0x1000, "Box<i32>") > 1);
1550
1551        // Simple types should have depth 1
1552        assert_eq!(tracker.calculate_ownership_depth(0x1000, "i32"), 1);
1553    }
1554
1555    #[test]
1556    fn test_owned_objects_counting() {
1557        let tracker = MemoryTracker::new();
1558
1559        // Collections should own multiple objects
1560        assert!(tracker.count_owned_objects(0x1000, "Vec<i32>") > 1);
1561        assert!(tracker.count_owned_objects(0x1000, "HashMap<String, i32>") > 1);
1562
1563        // Simple types should own just themselves
1564        assert_eq!(tracker.count_owned_objects(0x1000, "i32"), 1);
1565    }
1566
1567    #[test]
1568    fn test_reference_count_detection() {
1569        let tracker = MemoryTracker::new();
1570
1571        // Reference counted types should return Some
1572        assert!(tracker.get_reference_count_for_type("Rc<i32>").is_some());
1573        assert!(tracker.get_reference_count_for_type("Arc<i32>").is_some());
1574
1575        // Non-reference counted types should return None
1576        assert!(tracker.get_reference_count_for_type("Box<i32>").is_none());
1577        assert!(tracker.get_reference_count_for_type("i32").is_none());
1578    }
1579
1580    #[test]
1581    fn test_generic_type_extraction() {
1582        let tracker = MemoryTracker::new();
1583
1584        assert_eq!(
1585            tracker.extract_generic_type("Vec<i32>", "Vec"),
1586            Some("i32".to_string())
1587        );
1588        assert_eq!(
1589            tracker.extract_generic_type("HashMap<String, i32>", "HashMap"),
1590            Some("String, i32".to_string())
1591        );
1592        assert_eq!(
1593            tracker.extract_generic_type("Box<Vec<String>>", "Box"),
1594            Some("Vec<String>".to_string())
1595        );
1596
1597        // Non-generic types should return None
1598        assert_eq!(tracker.extract_generic_type("i32", ""), None);
1599    }
1600}