memscope_rs/core/tracker/
allocation_tracking.rs

1//! Memory allocation tracking implementation with bounded memory stats.
2//!
3//! This module contains the core allocation and deallocation tracking logic
4//! for the MemoryTracker, using BoundedMemoryStats to prevent infinite growth.
5
6use super::memory_tracker::MemoryTracker;
7use crate::core::ownership_history::OwnershipEventType;
8use crate::core::types::{AllocationInfo, TrackingResult};
9
10impl MemoryTracker {
11    /// Fast track allocation for testing (minimal overhead)
12    pub fn fast_track_allocation(
13        &self,
14        ptr: usize,
15        size: usize,
16        var_name: String,
17    ) -> TrackingResult<()> {
18        if !self.is_fast_mode() {
19            return self.create_synthetic_allocation(ptr, size, var_name, "unknown".to_string(), 0);
20        }
21
22        // In fast mode, create minimal allocation info but still track it
23        let mut allocation = AllocationInfo::new(ptr, size);
24        allocation.var_name = Some(var_name);
25        allocation.type_name = Some("fast_tracked".to_string());
26
27        // Apply Task 4 enhancement: calculate lifetime
28        self.calculate_and_analyze_lifetime(&mut allocation);
29
30        // Use blocking locks in fast mode for accurate tracking
31        match (self.active_allocations.lock(), self.bounded_stats.lock()) {
32            (Ok(mut active), Ok(mut bounded_stats)) => {
33                active.insert(ptr, allocation.clone());
34                bounded_stats.add_allocation(&allocation);
35                Ok(())
36            }
37            _ => {
38                // Fallback: still track the allocation even if locks fail
39                tracing::warn!("Failed to acquire locks in fast_track_allocation");
40                Ok(())
41            }
42        }
43    }
44
45    /// Track a new memory allocation using bounded stats.
46    pub fn track_allocation(&self, ptr: usize, size: usize) -> TrackingResult<()> {
47        // CRITICAL FIX: Skip advanced tracking for global allocator calls
48        // Only do basic tracking for system allocations, save advanced features for user variables
49        let is_user_variable = false; // This is a system allocation from global allocator
50
51        // Create allocation info first (no locks needed)
52        let mut allocation = AllocationInfo::new(ptr, size);
53
54        // Apply Task 4 enhancement: calculate lifetime (only for user variables)
55        if is_user_variable {
56            self.calculate_and_analyze_lifetime(&mut allocation);
57        }
58
59        // In test mode or when explicitly requested, use blocking locks for accuracy
60        let use_blocking_locks = self.is_fast_mode()
61            || std::env::var("MEMSCOPE_ACCURATE_TRACKING").is_ok()
62            || cfg!(test);
63
64        if use_blocking_locks {
65            // Use blocking locks to ensure all allocations are tracked in tests
66            let mut active = self.active_allocations.lock().map_err(|_| {
67                crate::core::types::TrackingError::LockError(
68                    "Failed to acquire active_allocations lock".to_string(),
69                )
70            })?;
71
72            let mut bounded_stats = self.bounded_stats.lock().map_err(|_| {
73                crate::core::types::TrackingError::LockError(
74                    "Failed to acquire bounded_stats lock".to_string(),
75                )
76            })?;
77
78            // Insert allocation into active tracking
79            active.insert(ptr, allocation.clone());
80
81            // Update bounded statistics (automatically handles bounds)
82            bounded_stats.add_allocation(&allocation);
83
84            // Release locks before adding to history
85            drop(bounded_stats);
86            drop(active);
87
88            // Add to bounded history manager (automatically handles bounds)
89            if !self.is_fast_mode() && std::env::var("MEMSCOPE_FULL_HISTORY").is_ok() {
90                if let Ok(mut history_manager) = self.history_manager.try_lock() {
91                    history_manager.add_allocation(allocation);
92                }
93            }
94
95            Ok(())
96        } else {
97            // Production mode: use try_lock with improved retry logic
98            self.track_allocation_with_retry(ptr, size, allocation)
99        }
100    }
101
102    /// Track a memory allocation with enhanced context information
103    pub fn track_allocation_with_context(
104        &self,
105        ptr: usize,
106        size: usize,
107        inferred_var_name: String,
108        inferred_type_name: String,
109    ) -> TrackingResult<()> {
110        // Create allocation info with enhanced context
111        let mut allocation = AllocationInfo::new(ptr, size);
112
113        // Set the inferred names - this gives system allocations meaningful names
114        allocation.var_name = Some(inferred_var_name);
115        allocation.type_name = Some(inferred_type_name);
116
117        // Apply Task 4 enhancement: calculate lifetime
118        self.calculate_and_analyze_lifetime(&mut allocation);
119
120        // Use the same locking strategy as regular track_allocation
121        let use_blocking_locks = self.is_fast_mode()
122            || std::env::var("MEMSCOPE_ACCURATE_TRACKING").is_ok()
123            || cfg!(test);
124
125        if use_blocking_locks {
126            // Use blocking locks to ensure all allocations are tracked in tests
127            let mut active = self.active_allocations.lock().map_err(|_| {
128                crate::core::types::TrackingError::LockError(
129                    "Failed to acquire active_allocations lock".to_string(),
130                )
131            })?;
132
133            let mut bounded_stats = self.bounded_stats.lock().map_err(|_| {
134                crate::core::types::TrackingError::LockError(
135                    "Failed to acquire bounded_stats lock".to_string(),
136                )
137            })?;
138
139            // Insert allocation into active tracking
140            active.insert(ptr, allocation.clone());
141
142            // Update bounded statistics (automatically handles bounds)
143            bounded_stats.add_allocation(&allocation);
144
145            // Release locks before adding to history
146            drop(bounded_stats);
147            drop(active);
148
149            // Add to bounded history manager (automatically handles bounds)
150            if !self.is_fast_mode() && std::env::var("MEMSCOPE_FULL_HISTORY").is_ok() {
151                if let Ok(mut history_manager) = self.history_manager.try_lock() {
152                    history_manager.add_allocation(allocation);
153                }
154            }
155
156            Ok(())
157        } else {
158            // Production mode: use try_lock with improved retry logic
159            self.track_allocation_with_context_retry(ptr, size, allocation)
160        }
161    }
162
163    /// Track a memory deallocation using bounded stats.
164    pub fn track_deallocation(&self, ptr: usize) -> TrackingResult<()> {
165        let dealloc_timestamp = std::time::SystemTime::now()
166            .duration_since(std::time::UNIX_EPOCH)
167            .unwrap_or_default()
168            .as_nanos() as u64;
169
170        // In test mode or when explicitly requested, use blocking locks for accuracy
171        let use_blocking_locks = self.is_fast_mode()
172            || std::env::var("MEMSCOPE_ACCURATE_TRACKING").is_ok()
173            || cfg!(test);
174
175        if use_blocking_locks {
176            self.track_deallocation_blocking(ptr, dealloc_timestamp)
177        } else {
178            // Production mode: use try_lock with improved retry logic
179            self.track_deallocation_with_retry(ptr, dealloc_timestamp)
180        }
181    }
182
183    // Private helper methods
184
185    /// Track allocation with retry logic for production mode
186    fn track_allocation_with_retry(
187        &self,
188        ptr: usize,
189        _size: usize,
190        allocation: AllocationInfo,
191    ) -> TrackingResult<()> {
192        let mut retry_count = 0;
193        const MAX_RETRIES: u32 = 10;
194
195        while retry_count < MAX_RETRIES {
196            match (
197                self.active_allocations.try_lock(),
198                self.bounded_stats.try_lock(),
199            ) {
200                (Ok(mut active), Ok(mut bounded_stats)) => {
201                    // Insert allocation into active tracking
202                    active.insert(ptr, allocation.clone());
203
204                    // Update bounded statistics (automatically handles bounds)
205                    bounded_stats.add_allocation(&allocation);
206
207                    return Ok(());
208                }
209                _ => {
210                    retry_count += 1;
211                    if retry_count < MAX_RETRIES {
212                        std::thread::yield_now();
213                    }
214                }
215            }
216        }
217
218        // If all retries failed, return error
219        Err(crate::core::types::TrackingError::LockError(
220            "Failed to acquire locks after retries".to_string(),
221        ))
222    }
223
224    /// Track allocation with context retry logic for production mode
225    fn track_allocation_with_context_retry(
226        &self,
227        ptr: usize,
228        _size: usize,
229        allocation: AllocationInfo,
230    ) -> TrackingResult<()> {
231        let mut retry_count = 0;
232        const MAX_RETRIES: u32 = 10;
233
234        while retry_count < MAX_RETRIES {
235            match (
236                self.active_allocations.try_lock(),
237                self.bounded_stats.try_lock(),
238            ) {
239                (Ok(mut active), Ok(mut bounded_stats)) => {
240                    // Insert allocation into active tracking
241                    active.insert(ptr, allocation.clone());
242
243                    // Update bounded statistics (automatically handles bounds)
244                    bounded_stats.add_allocation(&allocation);
245
246                    // Try to add to history manager if possible
247                    if let Ok(mut history_manager) = self.history_manager.try_lock() {
248                        history_manager.add_allocation(allocation);
249                    }
250
251                    return Ok(());
252                }
253                _ => {
254                    retry_count += 1;
255                    if retry_count < MAX_RETRIES {
256                        std::thread::yield_now();
257                    }
258                }
259            }
260        }
261
262        // If all retries failed, return error
263        Err(crate::core::types::TrackingError::LockError(
264            "Failed to acquire locks after retries".to_string(),
265        ))
266    }
267
268    /// Track deallocation with blocking locks
269    fn track_deallocation_blocking(
270        &self,
271        ptr: usize,
272        dealloc_timestamp: u64,
273    ) -> TrackingResult<()> {
274        let mut active = self.active_allocations.lock().map_err(|_| {
275            crate::core::types::TrackingError::LockError(
276                "Failed to acquire active_allocations lock".to_string(),
277            )
278        })?;
279
280        let mut bounded_stats = self.bounded_stats.lock().map_err(|_| {
281            crate::core::types::TrackingError::LockError(
282                "Failed to acquire bounded_stats lock".to_string(),
283            )
284        })?;
285
286        if let Some(mut allocation) = active.remove(&ptr) {
287            // Set deallocation timestamp
288            allocation.timestamp_dealloc = Some(dealloc_timestamp);
289
290            // Apply Task 4 enhancement: calculate lifetime for deallocated allocation
291            self.calculate_and_analyze_lifetime(&mut allocation);
292
293            // Update bounded statistics
294            bounded_stats.record_deallocation(ptr, allocation.size);
295
296            // Release locks before updating history
297            drop(bounded_stats);
298            drop(active);
299
300            // Update allocation history with deallocation timestamp
301            if let Ok(mut history_manager) = self.history_manager.try_lock() {
302                history_manager.add_allocation(allocation);
303            }
304        }
305        Ok(())
306    }
307
308    /// Track deallocation with retry logic for production mode
309    fn track_deallocation_with_retry(
310        &self,
311        ptr: usize,
312        dealloc_timestamp: u64,
313    ) -> TrackingResult<()> {
314        let mut retry_count = 0;
315        const MAX_RETRIES: u32 = 10;
316
317        while retry_count < MAX_RETRIES {
318            match (
319                self.active_allocations.try_lock(),
320                self.bounded_stats.try_lock(),
321            ) {
322                (Ok(mut active), Ok(mut bounded_stats)) => {
323                    if let Some(mut allocation) = active.remove(&ptr) {
324                        // Set deallocation timestamp
325                        allocation.timestamp_dealloc = Some(dealloc_timestamp);
326
327                        // Apply Task 4 enhancement: calculate lifetime for deallocated allocation
328                        self.calculate_and_analyze_lifetime(&mut allocation);
329
330                        // Update bounded statistics
331                        bounded_stats.record_deallocation(ptr, allocation.size);
332
333                        // Release locks before updating history
334                        drop(bounded_stats);
335                        drop(active);
336
337                        // Update allocation history with deallocation timestamp
338                        if let Ok(mut history_manager) = self.history_manager.try_lock() {
339                            history_manager.add_allocation(allocation);
340                        }
341                    }
342                    return Ok(());
343                }
344                _ => {
345                    retry_count += 1;
346                    if retry_count < MAX_RETRIES {
347                        std::thread::yield_now();
348                    }
349                }
350            }
351        }
352
353        // If all retries failed, return error
354        Err(crate::core::types::TrackingError::LockError(
355            "Failed to acquire locks after retries".to_string(),
356        ))
357    }
358
359    /// Enhanced lifetime calculation and analysis for Task 4
360    /// This method fills the lifetime_ms field with precise calculations and adds lifecycle analysis
361    fn calculate_and_analyze_lifetime(&self, allocation: &mut AllocationInfo) {
362        // 1. Calculate precise lifetime based on timestamps
363        if allocation.lifetime_ms.is_none() {
364            if let Some(dealloc_time) = allocation.timestamp_dealloc {
365                // For deallocated objects, calculate exact lifetime
366                let lifetime_ns = dealloc_time.saturating_sub(allocation.timestamp_alloc);
367                let lifetime_ms = lifetime_ns / 1_000_000; // Convert to milliseconds
368                tracing::debug!(
369                    "Deallocated allocation lifetime: {}ns -> {}ms",
370                    lifetime_ns,
371                    lifetime_ms
372                );
373                allocation.lifetime_ms = Some(lifetime_ms);
374            } else {
375                // For active allocations, calculate current lifetime
376                let current_time = std::time::SystemTime::now()
377                    .duration_since(std::time::UNIX_EPOCH)
378                    .unwrap_or_default()
379                    .as_nanos() as u64;
380                let lifetime_ns = current_time.saturating_sub(allocation.timestamp_alloc);
381                let lifetime_ms = lifetime_ns / 1_000_000; // Convert to milliseconds
382                tracing::debug!(
383                    "Active allocation lifetime: {}ns -> {}ms",
384                    lifetime_ns,
385                    lifetime_ms
386                );
387                allocation.lifetime_ms = Some(lifetime_ms);
388            }
389        }
390
391        // 2. Perform lifecycle analysis and efficiency evaluation
392        if let Some(lifetime_ms) = allocation.lifetime_ms {
393            self.analyze_lifecycle_efficiency(allocation, lifetime_ms);
394        }
395    }
396
397    /// Analyze lifecycle efficiency (placeholder implementation)
398    fn analyze_lifecycle_efficiency(&self, _allocation: &mut AllocationInfo, _lifetime_ms: u64) {
399        // This would contain the actual lifecycle analysis logic
400        // For now, it's a placeholder to maintain compatibility
401    }
402
403    /// Create synthetic allocation with proper var_name and type_name
404    pub fn create_synthetic_allocation(
405        &self,
406        ptr: usize,
407        size: usize,
408        var_name: String,
409        type_name: String,
410        _creation_time: u64,
411    ) -> TrackingResult<()> {
412        let mut allocation = AllocationInfo::new(ptr, size);
413        allocation.var_name = Some(var_name.clone());
414        allocation.type_name = Some(type_name.clone());
415
416        // Apply improve.md field enhancements based on type
417        allocation.enhance_with_type_info(&type_name);
418
419        // Store the allocation and update stats
420        match self.active_allocations.try_lock() {
421            Ok(mut active) => {
422                active.insert(ptr, allocation.clone());
423                drop(active); // Release active lock before acquiring bounded_stats lock
424
425                // CRITICAL FIX: Update bounded stats for synthetic allocations
426                if let Ok(mut bounded_stats) = self.bounded_stats.try_lock() {
427                    bounded_stats.add_allocation(&allocation);
428                }
429
430                tracing::debug!(
431                    "Created synthetic allocation for '{}' ({}): ptr=0x{:x}, size={}",
432                    var_name,
433                    type_name,
434                    ptr,
435                    size
436                );
437                Ok(())
438            }
439            Err(_) => {
440                tracing::debug!(
441                    "Could not acquire lock for synthetic allocation: {}",
442                    var_name
443                );
444                Ok(())
445            }
446        }
447    }
448
449    /// Associate a variable name and type with an allocation.
450    pub fn associate_var(
451        &self,
452        ptr: usize,
453        var_name: String,
454        type_name: String,
455    ) -> TrackingResult<()> {
456        // In test mode or when explicitly requested, use blocking locks for accuracy
457        let use_blocking_locks = self.is_fast_mode()
458            || std::env::var("MEMSCOPE_ACCURATE_TRACKING").is_ok()
459            || cfg!(test);
460
461        if use_blocking_locks {
462            // Use blocking locks to ensure all associations are tracked in tests
463            let mut active = self.active_allocations.lock().map_err(|_| {
464                crate::core::types::TrackingError::LockError(
465                    "Failed to acquire active_allocations lock".to_string(),
466                )
467            })?;
468
469            if let Some(allocation) = active.get_mut(&ptr) {
470                let old_var_name_is_none = allocation.var_name.is_none();
471
472                allocation.var_name = Some(var_name.clone());
473                allocation.type_name = Some(type_name.clone());
474
475                // Apply improve.md field enhancements based on type
476                allocation.enhance_with_type_info(&type_name);
477
478                // CRITICAL FIX: Update bounded_stats after associating var_name
479                // Clone the allocation to pass to bounded_stats
480                let allocation_clone = allocation.clone();
481                drop(active); // Release active lock before acquiring bounded_stats lock
482
483                if let Ok(mut bounded_stats) = self.bounded_stats.lock() {
484                    bounded_stats
485                        .update_active_allocation_status(&allocation_clone, old_var_name_is_none);
486                }
487
488                tracing::debug!(
489                    "Associated variable '{}' with existing allocation at {:x}",
490                    var_name,
491                    ptr
492                );
493            } else {
494                // For smart pointers and other complex types, create a synthetic allocation entry
495                let mut synthetic_allocation = AllocationInfo::new(ptr, 0);
496                synthetic_allocation.var_name = Some(var_name.clone());
497                synthetic_allocation.type_name = Some(type_name.clone());
498
499                // Estimate size based on type
500                let estimated_size = self.estimate_type_size(&type_name);
501                synthetic_allocation.size = estimated_size;
502
503                // Apply improve.md field enhancements based on type
504                synthetic_allocation.enhance_with_type_info(&type_name);
505
506                // Add to active allocations for tracking
507                active.insert(ptr, synthetic_allocation.clone());
508
509                // Release active lock before acquiring bounded_stats lock
510                drop(active);
511
512                let mut bounded_stats = self.bounded_stats.lock().map_err(|_| {
513                    crate::core::types::TrackingError::LockError(
514                        "Failed to acquire bounded_stats lock".to_string(),
515                    )
516                })?;
517                bounded_stats.add_allocation(&synthetic_allocation);
518
519                tracing::debug!(
520                    "Created synthetic allocation for variable '{}' at {:x} (estimated size: {})",
521                    var_name,
522                    ptr,
523                    estimated_size
524                );
525            }
526            Ok(())
527        } else {
528            // Production mode: use try_lock with retry logic
529            self.associate_var_with_retry(ptr, var_name, type_name)
530        }
531    }
532
533    /// Associate variable with retry logic for production mode
534    fn associate_var_with_retry(
535        &self,
536        ptr: usize,
537        var_name: String,
538        type_name: String,
539    ) -> TrackingResult<()> {
540        let mut retry_count = 0;
541        const MAX_RETRIES: u32 = 10;
542
543        while retry_count < MAX_RETRIES {
544            match self.active_allocations.try_lock() {
545                Ok(mut active) => {
546                    if let Some(allocation) = active.get_mut(&ptr) {
547                        allocation.var_name = Some(var_name.clone());
548                        allocation.type_name = Some(type_name.clone());
549
550                        // Apply improve.md field enhancements based on type
551                        allocation.enhance_with_type_info(&type_name);
552
553                        tracing::debug!(
554                            "Associated variable '{}' with existing allocation at {:x}",
555                            var_name,
556                            ptr
557                        );
558                        return Ok(());
559                    } else {
560                        // For smart pointers and other complex types, create a synthetic allocation entry
561                        let mut synthetic_allocation = AllocationInfo::new(ptr, 0);
562                        synthetic_allocation.var_name = Some(var_name.clone());
563                        synthetic_allocation.type_name = Some(type_name.clone());
564
565                        // Estimate size based on type
566                        let estimated_size = self.estimate_type_size(&type_name);
567                        synthetic_allocation.size = estimated_size;
568
569                        // Apply improve.md field enhancements based on type
570                        synthetic_allocation.enhance_with_type_info(&type_name);
571
572                        // Add to active allocations for tracking
573                        active.insert(ptr, synthetic_allocation.clone());
574
575                        // Release active lock before acquiring bounded_stats lock
576                        drop(active);
577
578                        if let Ok(mut bounded_stats) = self.bounded_stats.try_lock() {
579                            bounded_stats.add_allocation(&synthetic_allocation);
580                        }
581
582                        tracing::debug!("Created synthetic allocation for variable '{}' at {:x} (estimated size: {})", 
583                                       var_name, ptr, estimated_size);
584                        return Ok(());
585                    }
586                }
587                Err(_) => {
588                    retry_count += 1;
589                    if retry_count < MAX_RETRIES {
590                        std::thread::yield_now();
591                    }
592                }
593            }
594        }
595
596        // If all retries failed, return error
597        Err(crate::core::types::TrackingError::LockError(
598            "Failed to acquire locks after retries".to_string(),
599        ))
600    }
601
602    /// Enhance allocation with improve.md required fields
603    fn _enhance_allocation_with_improve_md_fields(
604        mut allocation: AllocationInfo,
605    ) -> AllocationInfo {
606        // Simulate borrowing information based on type patterns
607        if let Some(ref type_name) = allocation.type_name {
608            // Detect reference counting types (Rc, Arc)
609            if type_name.contains("Rc<") || type_name.contains("Arc<") {
610                allocation.clone_info = Some(crate::core::types::CloneInfo {
611                    clone_count: 2,  // Simulate that Rc/Arc types are typically cloned
612                    is_clone: false, // This is the original
613                    original_ptr: None,
614                });
615                allocation.ownership_history_available = true;
616            }
617
618            // Detect collections that are commonly borrowed
619            if type_name.contains("Vec<")
620                || type_name.contains("String")
621                || type_name.contains("HashMap")
622            {
623                allocation.borrow_info = Some(crate::core::types::BorrowInfo {
624                    immutable_borrows: 3, // Simulate common borrowing patterns
625                    mutable_borrows: 1,
626                    max_concurrent_borrows: 2,
627                    last_borrow_timestamp: Some(allocation.timestamp_alloc + 1000000),
628                });
629                allocation.ownership_history_available = true;
630            }
631
632            // Detect Box types
633            if type_name.contains("Box<") {
634                allocation.borrow_info = Some(crate::core::types::BorrowInfo {
635                    immutable_borrows: 1,
636                    mutable_borrows: 0,
637                    max_concurrent_borrows: 1,
638                    last_borrow_timestamp: Some(allocation.timestamp_alloc + 500000),
639                });
640                allocation.ownership_history_available = true;
641            }
642        }
643
644        // Calculate lifetime_ms for active allocations
645        if allocation.timestamp_dealloc.is_none() {
646            // For active allocations, calculate elapsed time
647            let current_time = std::time::SystemTime::now()
648                .duration_since(std::time::UNIX_EPOCH)
649                .unwrap_or_default()
650                .as_nanos() as u64;
651            let elapsed_ns = current_time.saturating_sub(allocation.timestamp_alloc);
652            allocation.lifetime_ms = Some(elapsed_ns / 1_000_000); // Convert to milliseconds
653        }
654
655        allocation
656    }
657
658    /// Track smart pointer clone relationship
659    pub fn track_smart_pointer_clone(
660        &self,
661        clone_ptr: usize,
662        source_ptr: usize,
663        _data_ptr: usize,
664        _new_ref_count: usize,
665        _weak_count: usize,
666    ) -> TrackingResult<()> {
667        match self.active_allocations.try_lock() {
668            Ok(mut active) => {
669                // Update source pointer's clone list
670                if let Some(source_alloc) = active.get_mut(&source_ptr) {
671                    if let Some(ref mut smart_info) = source_alloc.smart_pointer_info {
672                        smart_info.record_clone(clone_ptr, source_ptr);
673                    }
674                }
675
676                // Update clone pointer's source reference
677                if let Some(clone_alloc) = active.get_mut(&clone_ptr) {
678                    if let Some(ref mut smart_info) = clone_alloc.smart_pointer_info {
679                        smart_info.cloned_from = Some(source_ptr);
680                    }
681                }
682
683                tracing::debug!(
684                    "🔗 Tracked clone relationship: 0x{:x} -> 0x{:x}",
685                    source_ptr,
686                    clone_ptr
687                );
688
689                Ok(())
690            }
691            Err(_) => {
692                // Skip if we can't get the lock
693                Ok(())
694            }
695        }
696    }
697
698    /// Update reference count for a smart pointer
699    pub fn update_smart_pointer_ref_count(
700        &self,
701        ptr: usize,
702        strong_count: usize,
703        weak_count: usize,
704    ) -> TrackingResult<()> {
705        match self.active_allocations.try_lock() {
706            Ok(mut active) => {
707                if let Some(allocation) = active.get_mut(&ptr) {
708                    if let Some(ref mut smart_info) = allocation.smart_pointer_info {
709                        smart_info.update_ref_count(strong_count, weak_count);
710
711                        tracing::debug!(
712                            "📊 Updated ref count for 0x{:x}: strong={}, weak={}",
713                            ptr,
714                            strong_count,
715                            weak_count
716                        );
717                    }
718                }
719                Ok(())
720            }
721            Err(_) => Ok(()),
722        }
723    }
724
725    /// Create a specialized synthetic allocation for smart pointers
726    #[allow(clippy::too_many_arguments)]
727    pub fn create_smart_pointer_allocation(
728        &self,
729        ptr: usize,
730        size: usize,
731        var_name: String,
732        type_name: String,
733        creation_time: u64,
734        ref_count: usize,
735        data_ptr: usize,
736    ) -> TrackingResult<()> {
737        let mut allocation = AllocationInfo::new(ptr, size);
738        allocation.var_name = Some(var_name.clone());
739        allocation.type_name = Some(type_name.clone());
740        allocation.timestamp_alloc = creation_time;
741
742        // Determine smart pointer type
743        let pointer_type = if type_name.contains("std::rc::Rc") {
744            crate::core::types::SmartPointerType::Rc
745        } else if type_name.contains("std::sync::Arc") {
746            crate::core::types::SmartPointerType::Arc
747        } else if type_name.contains("std::rc::Weak") {
748            crate::core::types::SmartPointerType::RcWeak
749        } else if type_name.contains("std::sync::Weak") {
750            crate::core::types::SmartPointerType::ArcWeak
751        } else if type_name.contains("Box") {
752            crate::core::types::SmartPointerType::Box
753        } else {
754            crate::core::types::SmartPointerType::Rc // Default fallback
755        };
756
757        // Create smart pointer info
758        let smart_pointer_info = if matches!(
759            pointer_type,
760            crate::core::types::SmartPointerType::RcWeak
761                | crate::core::types::SmartPointerType::ArcWeak
762        ) {
763            crate::core::types::SmartPointerInfo::new_weak(data_ptr, pointer_type, ref_count)
764        } else {
765            crate::core::types::SmartPointerInfo::new_rc_arc(data_ptr, pointer_type, ref_count, 0)
766        };
767
768        allocation.smart_pointer_info = Some(smart_pointer_info);
769
770        // Enhance allocation with detailed analysis
771        self.enhance_allocation_info(&mut allocation);
772
773        // Use try_lock to avoid blocking
774        match (
775            self.active_allocations.try_lock(),
776            self.bounded_stats.try_lock(),
777        ) {
778            (Ok(mut active), Ok(mut bounded_stats)) => {
779                // Add to active allocations
780                active.insert(ptr, allocation.clone());
781
782                // Update bounded statistics
783                bounded_stats.add_allocation(&allocation);
784
785                // Release locks before updating history
786                drop(bounded_stats);
787                drop(active);
788
789                // Add to allocation history (only if needed for analysis and not in fast mode)
790                if !self.is_fast_mode() && std::env::var("MEMSCOPE_FULL_HISTORY").is_ok() {
791                    if let Ok(mut history_manager) = self.history_manager.try_lock() {
792                        history_manager.add_allocation(allocation);
793                    }
794                }
795
796                tracing::debug!(
797                    "🎯 Created smart pointer allocation for '{}' ({}): ptr=0x{:x}, size={}, ref_count={}, data_ptr=0x{:x}",
798                    var_name,
799                    type_name,
800                    ptr,
801                    size,
802                    ref_count,
803                    data_ptr
804                );
805
806                Ok(())
807            }
808            _ => {
809                // Use a brief retry strategy instead of immediate failure
810                for attempt in 0..3 {
811                    std::thread::sleep(std::time::Duration::from_nanos(100 * (attempt + 1)));
812                    if let (Ok(mut active), Ok(mut bounded_stats)) = (
813                        self.active_allocations.try_lock(),
814                        self.bounded_stats.try_lock(),
815                    ) {
816                        active.insert(ptr, allocation.clone());
817                        bounded_stats.add_allocation(&allocation);
818                        drop(bounded_stats);
819                        drop(active);
820
821                        // Add to allocation history (only if needed for analysis)
822                        if std::env::var("MEMSCOPE_FULL_HISTORY").is_ok() {
823                            if let Ok(mut history_manager) = self.history_manager.try_lock() {
824                                history_manager.add_allocation(allocation.clone());
825                            }
826                        }
827
828                        tracing::debug!(
829                            "🎯 Created smart pointer allocation for '{}' ({}): ptr=0x{:x}, size={}, ref_count={}, data_ptr=0x{:x} (attempt {})",
830                            var_name,
831                            type_name,
832                            ptr,
833                            size,
834                            ref_count,
835                            data_ptr,
836                            attempt + 1
837                        );
838                        return Ok(());
839                    }
840                }
841
842                // Only debug log after all retries failed
843                tracing::debug!(
844                    "⚠️ Failed to create smart pointer allocation for '{}' after retries",
845                    var_name
846                );
847                Ok(())
848            }
849        }
850    }
851
852    /// Track a memory deallocation with precise lifetime information.
853    pub fn track_deallocation_with_lifetime(
854        &self,
855        ptr: usize,
856        lifetime_ms: u64,
857    ) -> TrackingResult<()> {
858        let dealloc_timestamp = std::time::SystemTime::now()
859            .duration_since(std::time::UNIX_EPOCH)
860            .unwrap_or_default()
861            .as_nanos() as u64;
862
863        // Use try_lock to avoid blocking during high deallocation activity
864        match (
865            self.active_allocations.try_lock(),
866            self.bounded_stats.try_lock(),
867        ) {
868            (Ok(mut active), Ok(mut bounded_stats)) => {
869                if let Some(mut allocation) = active.remove(&ptr) {
870                    // Set deallocation timestamp and lifetime
871                    allocation.timestamp_dealloc = Some(dealloc_timestamp);
872                    allocation.lifetime_ms = Some(lifetime_ms);
873
874                    // Update bounded statistics
875                    bounded_stats.record_deallocation(ptr, allocation.size);
876
877                    // Release locks before updating history
878                    drop(bounded_stats);
879                    drop(active);
880
881                    // Update allocation history with deallocation timestamp AND lifetime
882                    if let Ok(mut history_manager) = self.history_manager.try_lock() {
883                        history_manager.add_allocation(allocation);
884                    }
885
886                    Ok(())
887                } else {
888                    Ok(()) // Allocation not found, but don't error
889                }
890            }
891            _ => Ok(()), // Lock contention, skip to avoid deadlock
892        }
893    }
894
895    /// Track the deallocation of a smart pointer with enhanced metadata.
896    pub fn track_smart_pointer_deallocation(
897        &self,
898        ptr: usize,
899        lifetime_ms: u64,
900        _final_ref_count: usize,
901    ) -> TrackingResult<()> {
902        self.track_deallocation_with_lifetime(ptr, lifetime_ms)
903    }
904
905    /// Enhance allocation info (placeholder implementation)
906    fn enhance_allocation_info(&self, _allocation: &mut AllocationInfo) {
907        // This would contain the actual enhancement logic
908        // For now, it's a placeholder to maintain compatibility
909    }
910
911    /// Record an ownership event for detailed lifecycle tracking
912    pub fn record_ownership_event(&self, ptr: usize, event_type: OwnershipEventType) {
913        if let Ok(mut ownership_history) = self.ownership_history.try_lock() {
914            ownership_history.record_event(ptr, event_type, 0);
915        }
916    }
917
918    /// Get ownership summary for an allocation
919    pub fn get_ownership_summary(
920        &self,
921        ptr: usize,
922    ) -> Option<crate::core::ownership_history::OwnershipSummary> {
923        if let Ok(ownership_history) = self.ownership_history.try_lock() {
924            ownership_history.get_summary(ptr).cloned()
925        } else {
926            None
927        }
928    }
929
930    /// Export ownership history to JSON
931    pub fn export_ownership_history(&self) -> Result<String, String> {
932        if let Ok(ownership_history) = self.ownership_history.try_lock() {
933            ownership_history
934                .export_to_json()
935                .map_err(|e| e.to_string())
936        } else {
937            Err("Failed to acquire ownership history lock".to_string())
938        }
939    }
940}
941
942#[cfg(test)]
943mod tests {
944    use crate::core::ownership_history::OwnershipEventType;
945    use crate::core::tracker::memory_tracker::MemoryTracker;
946    use std::sync::Arc;
947
948    fn create_test_tracker() -> MemoryTracker {
949        MemoryTracker::new()
950    }
951
952    #[test]
953    fn test_fast_track_allocation() {
954        let tracker = create_test_tracker();
955
956        let result = tracker.fast_track_allocation(0x1000, 64, "test_var".to_string());
957        assert!(result.is_ok());
958
959        // Verify allocation was tracked
960        let allocations = tracker.get_active_allocations().unwrap();
961        let allocation = allocations.iter().find(|a| a.ptr == 0x1000).unwrap();
962        assert_eq!(allocation.size, 64);
963        assert_eq!(allocation.var_name, Some("test_var".to_string()));
964        assert_eq!(allocation.type_name, Some("fast_tracked".to_string()));
965    }
966
967    #[test]
968    fn test_fast_track_allocation_multiple() {
969        let tracker = create_test_tracker();
970
971        // Track multiple allocations
972        for i in 0..5 {
973            let ptr = 0x1000 + i * 0x100;
974            let size = 64 + i * 32;
975            let var_name = format!("var_{}", i);
976
977            let result = tracker.fast_track_allocation(ptr, size, var_name.clone());
978            assert!(result.is_ok());
979        }
980
981        // Verify all allocations were tracked
982        let allocations = tracker.get_active_allocations().unwrap();
983        assert_eq!(allocations.len(), 5);
984
985        for i in 0..5 {
986            let ptr = 0x1000 + i * 0x100;
987            let allocation = allocations.iter().find(|a| a.ptr == ptr).unwrap();
988            assert_eq!(allocation.size, 64 + i * 32);
989            assert_eq!(allocation.var_name, Some(format!("var_{}", i)));
990        }
991    }
992
993    #[test]
994    fn test_track_allocation() {
995        let tracker = create_test_tracker();
996
997        let result = tracker.track_allocation(0x2000, 128);
998        assert!(result.is_ok());
999
1000        // Verify allocation was tracked
1001        let allocations = tracker.get_active_allocations().unwrap();
1002        let allocation = allocations.iter().find(|a| a.ptr == 0x2000).unwrap();
1003        assert_eq!(allocation.size, 128);
1004        assert_eq!(allocation.ptr, 0x2000);
1005    }
1006
1007    #[test]
1008    fn test_track_allocation_with_context() {
1009        let tracker = create_test_tracker();
1010
1011        let result = tracker.track_allocation_with_context(
1012            0x3000,
1013            256,
1014            "context_var".to_string(),
1015            "String".to_string(),
1016        );
1017        assert!(result.is_ok());
1018
1019        // Verify allocation was tracked with context
1020        let allocations = tracker.get_active_allocations().unwrap();
1021        let allocation = allocations.iter().find(|a| a.ptr == 0x3000).unwrap();
1022        assert_eq!(allocation.size, 256);
1023        assert_eq!(allocation.var_name, Some("context_var".to_string()));
1024        assert_eq!(allocation.type_name, Some("String".to_string()));
1025        assert!(allocation.lifetime_ms.is_some());
1026    }
1027
1028    #[test]
1029    fn test_track_deallocation() {
1030        let tracker = create_test_tracker();
1031
1032        // First track an allocation
1033        tracker.track_allocation(0x4000, 512).unwrap();
1034
1035        // Verify it's active
1036        let allocations = tracker.get_active_allocations().unwrap();
1037        assert!(allocations.iter().any(|a| a.ptr == 0x4000));
1038
1039        // Now deallocate it
1040        let result = tracker.track_deallocation(0x4000);
1041        assert!(result.is_ok());
1042
1043        // Verify it's no longer active
1044        let allocations = tracker.get_active_allocations().unwrap();
1045        assert!(!allocations.iter().any(|a| a.ptr == 0x4000));
1046    }
1047
1048    #[test]
1049    fn test_track_deallocation_nonexistent() {
1050        let tracker = create_test_tracker();
1051
1052        // Try to deallocate a non-existent allocation
1053        let result = tracker.track_deallocation(0x9999);
1054        assert!(result.is_ok()); // Should not error
1055    }
1056
1057    #[test]
1058    fn test_create_synthetic_allocation() {
1059        let tracker = create_test_tracker();
1060
1061        let result = tracker.create_synthetic_allocation(
1062            0x5000,
1063            1024,
1064            "synthetic_var".to_string(),
1065            "Vec<u8>".to_string(),
1066            1234567890,
1067        );
1068        assert!(result.is_ok());
1069
1070        // Verify synthetic allocation was created
1071        let allocations = tracker.get_active_allocations().unwrap();
1072        let allocation = allocations.iter().find(|a| a.ptr == 0x5000).unwrap();
1073        assert_eq!(allocation.size, 1024);
1074        assert_eq!(allocation.var_name, Some("synthetic_var".to_string()));
1075        assert_eq!(allocation.type_name, Some("Vec<u8>".to_string()));
1076    }
1077
1078    #[test]
1079    fn test_associate_var_existing_allocation() {
1080        let tracker = create_test_tracker();
1081
1082        // First track an allocation without context
1083        tracker.track_allocation(0x6000, 128).unwrap();
1084
1085        // Then associate a variable with it
1086        let result = tracker.associate_var(
1087            0x6000,
1088            "associated_var".to_string(),
1089            "HashMap<String, i32>".to_string(),
1090        );
1091        assert!(result.is_ok());
1092
1093        // Verify association was successful
1094        let allocations = tracker.get_active_allocations().unwrap();
1095        let allocation = allocations.iter().find(|a| a.ptr == 0x6000).unwrap();
1096        assert_eq!(allocation.var_name, Some("associated_var".to_string()));
1097        assert_eq!(
1098            allocation.type_name,
1099            Some("HashMap<String, i32>".to_string())
1100        );
1101    }
1102
1103    #[test]
1104    fn test_associate_var_new_allocation() {
1105        let tracker = create_test_tracker();
1106
1107        // Associate a variable with a non-existent allocation (creates synthetic)
1108        let result =
1109            tracker.associate_var(0x7000, "new_var".to_string(), "Box<String>".to_string());
1110        assert!(result.is_ok());
1111
1112        // Verify synthetic allocation was created
1113        let allocations = tracker.get_active_allocations().unwrap();
1114        let allocation = allocations.iter().find(|a| a.ptr == 0x7000).unwrap();
1115        assert_eq!(allocation.var_name, Some("new_var".to_string()));
1116        assert_eq!(allocation.type_name, Some("Box<String>".to_string()));
1117        assert!(allocation.size > 0); // Should have estimated size
1118    }
1119
1120    #[test]
1121    fn test_track_smart_pointer_clone() {
1122        let tracker = create_test_tracker();
1123
1124        // Create source allocation with smart pointer info
1125        tracker
1126            .create_smart_pointer_allocation(
1127                0x8000,
1128                24,
1129                "source_rc".to_string(),
1130                "std::rc::Rc<String>".to_string(),
1131                1234567890,
1132                1,
1133                0x8100,
1134            )
1135            .unwrap();
1136
1137        // Create clone allocation
1138        tracker
1139            .create_smart_pointer_allocation(
1140                0x8200,
1141                24,
1142                "clone_rc".to_string(),
1143                "std::rc::Rc<String>".to_string(),
1144                1234567900,
1145                2,
1146                0x8100, // Same data pointer
1147            )
1148            .unwrap();
1149
1150        // Track the clone relationship
1151        let result = tracker.track_smart_pointer_clone(0x8200, 0x8000, 0x8100, 2, 0);
1152        assert!(result.is_ok());
1153
1154        // Verify clone relationship was tracked
1155        let allocations = tracker.get_active_allocations().unwrap();
1156        let source_alloc = allocations.iter().find(|a| a.ptr == 0x8000).unwrap();
1157        let clone_alloc = allocations.iter().find(|a| a.ptr == 0x8200).unwrap();
1158
1159        assert!(source_alloc.smart_pointer_info.is_some());
1160        assert!(clone_alloc.smart_pointer_info.is_some());
1161    }
1162
1163    #[test]
1164    fn test_update_smart_pointer_ref_count() {
1165        let tracker = create_test_tracker();
1166
1167        // Create smart pointer allocation
1168        tracker
1169            .create_smart_pointer_allocation(
1170                0x9000,
1171                24,
1172                "ref_counted".to_string(),
1173                "std::rc::Rc<i32>".to_string(),
1174                1234567890,
1175                1,
1176                0x9100,
1177            )
1178            .unwrap();
1179
1180        // Update reference count
1181        let result = tracker.update_smart_pointer_ref_count(0x9000, 3, 1);
1182        assert!(result.is_ok());
1183
1184        // Verify reference count was updated
1185        let allocations = tracker.get_active_allocations().unwrap();
1186        let allocation = allocations.iter().find(|a| a.ptr == 0x9000).unwrap();
1187
1188        if let Some(ref smart_info) = allocation.smart_pointer_info {
1189            if let Some(latest) = smart_info.latest_ref_counts() {
1190                assert_eq!(latest.strong_count, 3);
1191                assert_eq!(latest.weak_count, 1);
1192            }
1193        } else {
1194            panic!("Smart pointer info should be present");
1195        }
1196    }
1197
1198    #[test]
1199    fn test_create_smart_pointer_allocation_rc() {
1200        let tracker = create_test_tracker();
1201
1202        let result = tracker.create_smart_pointer_allocation(
1203            0xa000,
1204            24,
1205            "rc_ptr".to_string(),
1206            "std::rc::Rc<Vec<u8>>".to_string(),
1207            1234567890,
1208            1,
1209            0xa100,
1210        );
1211        assert!(result.is_ok());
1212
1213        // Verify smart pointer allocation was created
1214        let allocations = tracker.get_active_allocations().unwrap();
1215        let allocation = allocations.iter().find(|a| a.ptr == 0xa000).unwrap();
1216        assert_eq!(allocation.size, 24);
1217        assert_eq!(allocation.var_name, Some("rc_ptr".to_string()));
1218        assert_eq!(
1219            allocation.type_name,
1220            Some("std::rc::Rc<Vec<u8>>".to_string())
1221        );
1222        assert!(allocation.smart_pointer_info.is_some());
1223
1224        if let Some(ref smart_info) = allocation.smart_pointer_info {
1225            assert_eq!(smart_info.data_ptr, 0xa100);
1226            if let Some(latest) = smart_info.latest_ref_counts() {
1227                assert_eq!(latest.strong_count, 1);
1228                assert_eq!(latest.weak_count, 0);
1229            }
1230        }
1231    }
1232
1233    #[test]
1234    fn test_create_smart_pointer_allocation_arc() {
1235        let tracker = create_test_tracker();
1236
1237        let result = tracker.create_smart_pointer_allocation(
1238            0xb000,
1239            24,
1240            "arc_ptr".to_string(),
1241            "std::sync::Arc<String>".to_string(),
1242            1234567890,
1243            1,
1244            0xb100,
1245        );
1246        assert!(result.is_ok());
1247
1248        // Verify Arc allocation was created
1249        let allocations = tracker.get_active_allocations().unwrap();
1250        let allocation = allocations.iter().find(|a| a.ptr == 0xb000).unwrap();
1251
1252        if let Some(ref smart_info) = allocation.smart_pointer_info {
1253            assert_eq!(
1254                smart_info.pointer_type,
1255                crate::core::types::SmartPointerType::Arc
1256            );
1257        }
1258    }
1259
1260    #[test]
1261    fn test_create_smart_pointer_allocation_box() {
1262        let tracker = create_test_tracker();
1263
1264        let result = tracker.create_smart_pointer_allocation(
1265            0xc000,
1266            8,
1267            "box_ptr".to_string(),
1268            "Box<i64>".to_string(),
1269            1234567890,
1270            1,
1271            0xc100,
1272        );
1273        assert!(result.is_ok());
1274
1275        // Verify Box allocation was created
1276        let allocations = tracker.get_active_allocations().unwrap();
1277        let allocation = allocations.iter().find(|a| a.ptr == 0xc000).unwrap();
1278
1279        if let Some(ref smart_info) = allocation.smart_pointer_info {
1280            assert_eq!(
1281                smart_info.pointer_type,
1282                crate::core::types::SmartPointerType::Box
1283            );
1284        }
1285    }
1286
1287    #[test]
1288    fn test_create_smart_pointer_allocation_weak() {
1289        let tracker = create_test_tracker();
1290
1291        let result = tracker.create_smart_pointer_allocation(
1292            0xd000,
1293            24,
1294            "weak_ptr".to_string(),
1295            "std::rc::Weak<String>".to_string(),
1296            1234567890,
1297            2, // weak count
1298            0xd100,
1299        );
1300        assert!(result.is_ok());
1301
1302        // Verify Weak allocation was created
1303        let allocations = tracker.get_active_allocations().unwrap();
1304        let allocation = allocations.iter().find(|a| a.ptr == 0xd000).unwrap();
1305
1306        if let Some(ref smart_info) = allocation.smart_pointer_info {
1307            assert_eq!(
1308                smart_info.pointer_type,
1309                crate::core::types::SmartPointerType::RcWeak
1310            );
1311            if let Some(latest) = smart_info.latest_ref_counts() {
1312                assert_eq!(latest.weak_count, 2);
1313            }
1314        }
1315    }
1316
1317    #[test]
1318    fn test_track_deallocation_with_lifetime() {
1319        let tracker = create_test_tracker();
1320
1321        // First track an allocation
1322        tracker.track_allocation(0xe000, 256).unwrap();
1323
1324        // Deallocate with specific lifetime
1325        let result = tracker.track_deallocation_with_lifetime(0xe000, 1500);
1326        assert!(result.is_ok());
1327
1328        // Verify allocation is no longer active
1329        let allocations = tracker.get_active_allocations().unwrap();
1330        assert!(!allocations.iter().any(|a| a.ptr == 0xe000));
1331    }
1332
1333    #[test]
1334    fn test_track_smart_pointer_deallocation() {
1335        let tracker = create_test_tracker();
1336
1337        // Create smart pointer allocation
1338        tracker
1339            .create_smart_pointer_allocation(
1340                0xf000,
1341                24,
1342                "dealloc_rc".to_string(),
1343                "std::rc::Rc<String>".to_string(),
1344                1234567890,
1345                1,
1346                0xf100,
1347            )
1348            .unwrap();
1349
1350        // Deallocate smart pointer
1351        let result = tracker.track_smart_pointer_deallocation(0xf000, 2000, 0);
1352        assert!(result.is_ok());
1353
1354        // Verify allocation is no longer active
1355        let allocations = tracker.get_active_allocations().unwrap();
1356        assert!(!allocations.iter().any(|a| a.ptr == 0xf000));
1357    }
1358
1359    #[test]
1360    fn test_record_ownership_event() {
1361        let tracker = create_test_tracker();
1362
1363        // Record various ownership events
1364        tracker.record_ownership_event(0x10000, OwnershipEventType::Allocated);
1365        tracker.record_ownership_event(
1366            0x10000,
1367            OwnershipEventType::Borrowed {
1368                borrower_scope: "test_scope".to_string(),
1369            },
1370        );
1371        tracker.record_ownership_event(
1372            0x10000,
1373            OwnershipEventType::OwnershipTransferred {
1374                target_var: "new_var".to_string(),
1375            },
1376        );
1377        tracker.record_ownership_event(0x10000, OwnershipEventType::Dropped);
1378
1379        // This should not panic or error
1380    }
1381
1382    #[test]
1383    fn test_get_ownership_summary() {
1384        let tracker = create_test_tracker();
1385
1386        // Record some ownership events
1387        tracker.record_ownership_event(0x11000, OwnershipEventType::Allocated);
1388        tracker.record_ownership_event(
1389            0x11000,
1390            OwnershipEventType::Borrowed {
1391                borrower_scope: "test_scope".to_string(),
1392            },
1393        );
1394
1395        // Get ownership summary
1396        let summary = tracker.get_ownership_summary(0x11000);
1397        assert!(summary.is_some());
1398
1399        // Test non-existent allocation
1400        let no_summary = tracker.get_ownership_summary(0x99999);
1401        assert!(no_summary.is_none() || no_summary.is_some()); // Either is valid
1402    }
1403
1404    #[test]
1405    fn test_export_ownership_history() {
1406        let tracker = create_test_tracker();
1407
1408        // Record some ownership events
1409        tracker.record_ownership_event(0x12000, OwnershipEventType::Allocated);
1410        tracker.record_ownership_event(
1411            0x12000,
1412            OwnershipEventType::Borrowed {
1413                borrower_scope: "test_scope".to_string(),
1414            },
1415        );
1416        tracker.record_ownership_event(0x12000, OwnershipEventType::Dropped);
1417
1418        // Export ownership history
1419        let result = tracker.export_ownership_history();
1420        assert!(result.is_ok());
1421
1422        let json_str = result.unwrap();
1423        assert!(!json_str.is_empty());
1424
1425        // Verify it's valid JSON
1426        let parsed: serde_json::Value = serde_json::from_str(&json_str).unwrap();
1427        assert!(parsed.is_object() || parsed.is_array());
1428    }
1429
1430    #[test]
1431    fn test_concurrent_allocations() {
1432        let tracker = Arc::new(create_test_tracker());
1433        let mut handles = vec![];
1434
1435        // Spawn multiple threads doing allocations
1436        for i in 0..5 {
1437            let tracker_clone = Arc::clone(&tracker);
1438            let handle = std::thread::spawn(move || {
1439                for j in 0..10 {
1440                    let ptr = (i * 1000 + j) * 0x100;
1441                    let size = 64 + j * 8;
1442                    let var_name = format!("thread_{}_var_{}", i, j);
1443
1444                    let _ = tracker_clone.fast_track_allocation(ptr, size, var_name);
1445                }
1446            });
1447            handles.push(handle);
1448        }
1449
1450        // Wait for all threads to complete
1451        for handle in handles {
1452            handle.join().unwrap();
1453        }
1454
1455        // Verify allocations were tracked
1456        let allocations = tracker.get_active_allocations().unwrap();
1457        assert!(!allocations.is_empty());
1458        assert!(allocations.len() <= 50); // Should be up to 50 allocations
1459    }
1460
1461    #[test]
1462    fn test_allocation_lifecycle() {
1463        let tracker = create_test_tracker();
1464
1465        // Track allocation
1466        tracker
1467            .track_allocation_with_context(
1468                0x13000,
1469                512,
1470                "lifecycle_var".to_string(),
1471                "Vec<String>".to_string(),
1472            )
1473            .unwrap();
1474
1475        // Verify it's active
1476        let allocations = tracker.get_active_allocations().unwrap();
1477        let allocation = allocations.iter().find(|a| a.ptr == 0x13000).unwrap();
1478        assert!(allocation.lifetime_ms.is_some());
1479
1480        // Associate additional info
1481        tracker
1482            .associate_var(
1483                0x13000,
1484                "updated_lifecycle_var".to_string(),
1485                "Vec<String>".to_string(),
1486            )
1487            .unwrap();
1488
1489        // Verify update
1490        let allocations = tracker.get_active_allocations().unwrap();
1491        let allocation = allocations.iter().find(|a| a.ptr == 0x13000).unwrap();
1492        assert_eq!(
1493            allocation.var_name,
1494            Some("updated_lifecycle_var".to_string())
1495        );
1496
1497        // Record ownership events
1498        tracker.record_ownership_event(0x13000, OwnershipEventType::Allocated);
1499        tracker.record_ownership_event(
1500            0x13000,
1501            OwnershipEventType::Borrowed {
1502                borrower_scope: "test_scope".to_string(),
1503            },
1504        );
1505
1506        // Deallocate
1507        tracker.track_deallocation(0x13000).unwrap();
1508
1509        // Verify it's no longer active
1510        let allocations = tracker.get_active_allocations().unwrap();
1511        assert!(!allocations.iter().any(|a| a.ptr == 0x13000));
1512    }
1513
1514    #[test]
1515    fn test_smart_pointer_lifecycle() {
1516        let tracker = create_test_tracker();
1517
1518        // Create Rc allocation
1519        tracker
1520            .create_smart_pointer_allocation(
1521                0x14000,
1522                24,
1523                "rc_lifecycle".to_string(),
1524                "std::rc::Rc<Vec<i32>>".to_string(),
1525                1234567890,
1526                1,
1527                0x14100,
1528            )
1529            .unwrap();
1530
1531        // Clone it
1532        tracker
1533            .create_smart_pointer_allocation(
1534                0x14200,
1535                24,
1536                "rc_clone".to_string(),
1537                "std::rc::Rc<Vec<i32>>".to_string(),
1538                1234567900,
1539                2,
1540                0x14100, // Same data pointer
1541            )
1542            .unwrap();
1543
1544        // Track clone relationship
1545        tracker
1546            .track_smart_pointer_clone(0x14200, 0x14000, 0x14100, 2, 0)
1547            .unwrap();
1548
1549        // Update reference counts
1550        tracker
1551            .update_smart_pointer_ref_count(0x14000, 2, 0)
1552            .unwrap();
1553        tracker
1554            .update_smart_pointer_ref_count(0x14200, 2, 0)
1555            .unwrap();
1556
1557        // Deallocate clone (ref count goes to 1)
1558        tracker
1559            .track_smart_pointer_deallocation(0x14200, 1000, 1)
1560            .unwrap();
1561
1562        // Update original ref count
1563        tracker
1564            .update_smart_pointer_ref_count(0x14000, 1, 0)
1565            .unwrap();
1566
1567        // Deallocate original (ref count goes to 0)
1568        tracker
1569            .track_smart_pointer_deallocation(0x14000, 2000, 0)
1570            .unwrap();
1571
1572        // Verify both are deallocated
1573        let allocations = tracker.get_active_allocations().unwrap();
1574        assert!(!allocations.iter().any(|a| a.ptr == 0x14000));
1575        assert!(!allocations.iter().any(|a| a.ptr == 0x14200));
1576    }
1577}