Skip to main content

memscope_rs/capture/backends/
mod.rs

1//! Capture backends for different tracking strategies.
2//!
3//! This module provides the CaptureBackend trait and implementations
4//! for different tracking strategies (core, lockfree, async, unified).
5//!
6//! All files are maintained under 1000 lines per coding standards.
7
8// Core tracking modules
9pub mod core_tracker;
10pub mod core_types;
11pub mod export_options;
12
13// Lockfree tracking modules
14pub mod lockfree_tracker;
15pub mod lockfree_types;
16
17// Async tracking modules
18pub mod async_tracker;
19pub mod async_types;
20
21// Task profile modules (task-level memory profiling)
22pub mod task_profile;
23
24// Efficiency scoring modules
25pub mod efficiency_scoring;
26
27// Bottleneck analysis modules
28pub mod bottleneck_analysis;
29
30// Hotspot analysis modules
31pub mod hotspot_analysis;
32
33// Resource ranking modules
34pub mod resource_ranking;
35
36// Unsafe/FFI tracking modules
37pub mod unsafe_tracking;
38
39// Unified tracking modules
40pub mod unified_tracker;
41
42// Global tracking module (lazy init, CLI-friendly)
43pub mod global_tracking;
44
45use crate::event_store::{MemoryEvent, MemoryEventType};
46
47// Re-export core tracker types
48pub use core_tracker::{
49    collect_all_trackers_local, configure_tracking_strategy, get_registry_stats_local, get_tracker,
50    MemoryTracker,
51};
52
53// Re-export export options
54pub use export_options::{ExportMode, ExportOptions};
55
56// Re-export async tracker types
57pub use async_tracker::{
58    create_tracked, get_memory_snapshot, initialize, is_tracking_active, shutdown, spawn_tracked,
59    track_current_allocation, track_current_deallocation, AsyncTracker,
60};
61pub use async_types::{
62    AsyncAllocation, AsyncError, AsyncMemorySnapshot, AsyncResult, AsyncSnapshot, AsyncStats,
63    ExtendedTaskInfo, TaskId, TaskInfo, TrackedFuture,
64};
65
66// Re-export task profile types
67pub use task_profile::{AggregatedTaskStats, TaskMemoryProfile, TaskProfileManager, TaskType};
68
69// Re-export efficiency scoring types
70pub use efficiency_scoring::{
71    ComponentScores, EfficiencyConfig, EfficiencyScorer, EfficiencyWeights,
72};
73
74// Re-export bottleneck analysis types
75pub use bottleneck_analysis::{
76    BottleneckAnalyzer, BottleneckConfig, BottleneckKind, BottleneckMetrics, PerformanceIssue,
77    TaskMetrics,
78};
79
80// Re-export hotspot analysis types
81pub use hotspot_analysis::{
82    AllocationFrequencyPattern, CallStackHotspot, FrequencyAnalysis, HotspotAnalyzer,
83    HotspotConfig, HotspotStatistics, MemoryUsagePeak,
84};
85
86// Re-export resource ranking types
87pub use resource_ranking::{
88    EfficiencyScores, RankingConfig, RankingStatistics, ResourceRanking, ResourceRankingAnalyzer,
89    TaskResourceMetrics,
90};
91
92// Re-export unsafe/FFI tracking types
93pub use unsafe_tracking::{
94    AllocationInfo, AllocationOrigin, AllocationSource, MemoryPassport, OwnershipInfo,
95    PassportStamp, SafetyViolation, SecurityClearance, UnsafeTracker, UnsafeTrackingConfig,
96    UnsafeTrackingStats, ValidityStatus, ViolationSeverity,
97};
98
99// Re-export lockfree tracker types
100pub use lockfree_tracker::{
101    finalize_thread_tracker, get_current_tracker, init_thread_tracker, is_tracking,
102    memory_snapshot, quick_trace, stop_tracing, trace_all, trace_thread, track_allocation_lockfree,
103    track_deallocation_lockfree, ThreadLocalTracker,
104};
105pub use lockfree_types::{
106    AllocationCategory, AnalysisSummary, Event, EventType, FrequencyData, FrequencyPattern,
107    InteractionType, LockfreeAnalysis, MemorySnapshot, MemoryStats, SamplingConfig, SystemMetrics,
108    ThreadInteraction, ThreadStats,
109};
110
111// Re-export unified tracker types
112pub use unified_tracker::{
113    detect_environment, get_backend, initialize as initialize_unified, AsyncRuntimeType,
114    BackendConfig, DetectionConfig, DispatcherConfig, DispatcherMetrics, EnvironmentDetection,
115    EnvironmentDetector, MemoryAnalysisData, MemoryStatistics,
116    MemoryTracker as UnifiedMemoryTracker, RuntimeEnvironment, SessionMetadata, TrackerConfig,
117    TrackerStatistics, TrackerType, TrackingDispatcher, TrackingOperation, TrackingSession,
118    TrackingStrategy, UnifiedBackend,
119};
120
121// Re-export global tracking types
122pub use global_tracking::{
123    global_tracker, init_global_tracking, init_global_tracking_with_config, is_initialized,
124    GlobalTracker, GlobalTrackerConfig, GlobalTrackerStats,
125};
126
127/// Capture Backend trait
128///
129/// All capture backends must implement this trait to provide
130/// a unified interface for capturing memory events.
131pub trait CaptureBackend: Send + Sync {
132    /// Capture an allocation event
133    fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent;
134
135    /// Capture a deallocation event
136    fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent;
137
138    /// Capture a reallocation event
139    fn capture_realloc(
140        &self,
141        ptr: usize,
142        old_size: usize,
143        new_size: usize,
144        thread_id: u64,
145    ) -> MemoryEvent;
146
147    /// Capture a move event
148    fn capture_move(
149        &self,
150        _from_ptr: usize,
151        to_ptr: usize,
152        size: usize,
153        thread_id: u64,
154    ) -> MemoryEvent;
155}
156
157/// Type of capture backend
158#[derive(Debug, Clone, Copy, PartialEq, Eq)]
159pub enum CaptureBackendType {
160    /// Core tracking backend (original implementation)
161    Core,
162    /// Lockfree tracking backend (lock-free multi-threaded)
163    Lockfree,
164    /// Async tracking backend (async task tracking)
165    Async,
166    /// Unified tracking backend (auto-detects best strategy)
167    Unified,
168}
169
170impl CaptureBackendType {
171    /// Create a capture backend instance
172    pub fn create_backend(&self) -> Box<dyn CaptureBackend> {
173        match self {
174            CaptureBackendType::Core => Box::new(CoreBackend),
175            CaptureBackendType::Lockfree => Box::new(LockfreeBackend),
176            CaptureBackendType::Async => Box::new(AsyncBackend),
177            CaptureBackendType::Unified => Box::new(UnifiedCaptureBackend::new()),
178        }
179    }
180}
181
182/// Core tracking backend
183///
184/// This is the original tracking backend implementation.
185#[derive(Debug)]
186pub struct CoreBackend;
187
188impl CaptureBackend for CoreBackend {
189    fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
190        MemoryEvent::allocate(ptr, size, thread_id)
191    }
192
193    fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
194        MemoryEvent::deallocate(ptr, size, thread_id)
195    }
196
197    fn capture_realloc(
198        &self,
199        ptr: usize,
200        old_size: usize,
201        new_size: usize,
202        thread_id: u64,
203    ) -> MemoryEvent {
204        MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
205    }
206
207    fn capture_move(
208        &self,
209        _from_ptr: usize,
210        to_ptr: usize,
211        size: usize,
212        thread_id: u64,
213    ) -> MemoryEvent {
214        MemoryEvent {
215            timestamp: MemoryEvent::now(),
216            event_type: MemoryEventType::Move,
217            ptr: to_ptr,
218            size,
219            old_size: None,
220            thread_id,
221            var_name: None,
222            type_name: None,
223            call_stack_hash: None,
224            thread_name: None,
225            source_file: None,
226            source_line: None,
227        }
228    }
229}
230
231/// Lockfree tracking backend
232///
233/// This backend uses lock-free data structures for high-performance
234/// multi-threaded tracking.
235#[derive(Debug)]
236pub struct LockfreeBackend;
237
238impl CaptureBackend for LockfreeBackend {
239    fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
240        MemoryEvent::allocate(ptr, size, thread_id).with_call_stack_hash(self.hash_call_stack())
241    }
242
243    fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
244        MemoryEvent::deallocate(ptr, size, thread_id).with_call_stack_hash(self.hash_call_stack())
245    }
246
247    fn capture_realloc(
248        &self,
249        ptr: usize,
250        old_size: usize,
251        new_size: usize,
252        thread_id: u64,
253    ) -> MemoryEvent {
254        MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
255            .with_call_stack_hash(self.hash_call_stack())
256    }
257
258    fn capture_move(
259        &self,
260        _from_ptr: usize,
261        to_ptr: usize,
262        size: usize,
263        thread_id: u64,
264    ) -> MemoryEvent {
265        MemoryEvent {
266            timestamp: MemoryEvent::now(),
267            event_type: MemoryEventType::Move,
268            ptr: to_ptr,
269            size,
270            old_size: None,
271            thread_id,
272            var_name: None,
273            type_name: None,
274            call_stack_hash: Some(self.hash_call_stack()),
275            thread_name: None,
276            source_file: None,
277            source_line: None,
278        }
279    }
280}
281
282impl LockfreeBackend {
283    /// Generate a hash of the current call context.
284    ///
285    /// Note: This is a lightweight hash based on thread ID and a counter,
286    /// not a full call stack capture. For full call stack tracking,
287    /// enable the `backtrace` feature.
288    #[inline]
289    fn hash_call_stack(&self) -> u64 {
290        use std::collections::hash_map::DefaultHasher;
291        use std::hash::{Hash, Hasher};
292
293        let mut hasher = DefaultHasher::new();
294
295        // Use thread ID for basic grouping
296        std::thread::current().id().hash(&mut hasher);
297
298        // Add a counter for uniqueness within the same thread
299        // Using a thread-local counter would be better but adds overhead
300        static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
301        let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
302        count.hash(&mut hasher);
303
304        hasher.finish()
305    }
306}
307
308/// Async tracking backend
309///
310/// This backend is optimized for async task tracking with task ID support.
311#[derive(Debug)]
312pub struct AsyncBackend;
313
314impl CaptureBackend for AsyncBackend {
315    fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
316        MemoryEvent::allocate(ptr, size, thread_id)
317    }
318
319    fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
320        MemoryEvent::deallocate(ptr, size, thread_id)
321    }
322
323    fn capture_realloc(
324        &self,
325        ptr: usize,
326        old_size: usize,
327        new_size: usize,
328        thread_id: u64,
329    ) -> MemoryEvent {
330        MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
331    }
332
333    fn capture_move(
334        &self,
335        _from_ptr: usize,
336        to_ptr: usize,
337        size: usize,
338        thread_id: u64,
339    ) -> MemoryEvent {
340        MemoryEvent {
341            timestamp: MemoryEvent::now(),
342            event_type: MemoryEventType::Move,
343            ptr: to_ptr,
344            size,
345            old_size: None,
346            thread_id,
347            var_name: None,
348            type_name: None,
349            call_stack_hash: None,
350            thread_name: None,
351            source_file: None,
352            source_line: None,
353        }
354    }
355}
356
357/// Unified capture backend
358///
359/// This backend automatically detects the best tracking strategy
360/// based on the runtime environment for capturing events.
361pub struct UnifiedCaptureBackend {
362    /// The actual backend being used
363    inner: Box<dyn CaptureBackend>,
364    /// Which backend was selected
365    backend_type: CaptureBackendType,
366}
367
368impl UnifiedCaptureBackend {
369    /// Detect the best capture backend for the current runtime environment.
370    ///
371    /// Selection logic:
372    /// - Single CPU core or unavailable parallelism → CoreBackend (simple, lowest overhead)
373    /// - Multiple CPU cores → LockfreeBackend (concurrent, high throughput)
374    ///
375    /// Note: AsyncBackend detection is not currently implemented.
376    /// The backend selection is made once at creation time and can be
377    /// refreshed using `refresh_backend()` if runtime conditions change.
378    fn detect_best_backend() -> (Box<dyn CaptureBackend>, CaptureBackendType) {
379        let thread_count = std::thread::available_parallelism()
380            .map(|p| p.get())
381            .unwrap_or(1);
382
383        if thread_count <= 1 {
384            (Box::new(CoreBackend), CaptureBackendType::Core)
385        } else {
386            (Box::new(LockfreeBackend), CaptureBackendType::Lockfree)
387        }
388    }
389
390    /// Create a new unified capture backend with auto-detection.
391    pub fn new() -> Self {
392        let (inner, backend_type) = Self::detect_best_backend();
393        Self {
394            inner,
395            backend_type,
396        }
397    }
398
399    /// Get which backend type was selected.
400    pub fn backend_type(&self) -> CaptureBackendType {
401        self.backend_type
402    }
403
404    /// Refresh the backend selection based on current runtime environment.
405    ///
406    /// This allows switching to a more appropriate backend if the
407    /// runtime conditions have changed (e.g., from single-threaded
408    /// to multi-threaded).
409    ///
410    /// Note: This replaces the inner backend with a new instance,
411    /// so any internal state is lost.
412    pub fn refresh_backend(&mut self) {
413        let (new_inner, new_type) = Self::detect_best_backend();
414        self.inner = new_inner;
415        self.backend_type = new_type;
416    }
417}
418
419impl Default for UnifiedCaptureBackend {
420    fn default() -> Self {
421        Self::new()
422    }
423}
424
425impl CaptureBackend for UnifiedCaptureBackend {
426    fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
427        self.inner.capture_alloc(ptr, size, thread_id)
428    }
429
430    fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
431        self.inner.capture_dealloc(ptr, size, thread_id)
432    }
433
434    fn capture_realloc(
435        &self,
436        ptr: usize,
437        old_size: usize,
438        new_size: usize,
439        thread_id: u64,
440    ) -> MemoryEvent {
441        self.inner
442            .capture_realloc(ptr, old_size, new_size, thread_id)
443    }
444
445    fn capture_move(
446        &self,
447        from_ptr: usize,
448        to_ptr: usize,
449        size: usize,
450        thread_id: u64,
451    ) -> MemoryEvent {
452        self.inner.capture_move(from_ptr, to_ptr, size, thread_id)
453    }
454}
455
456#[cfg(test)]
457mod tests {
458    use super::*;
459
460    #[test]
461    fn test_core_backend() {
462        let backend = CoreBackend;
463        let event = backend.capture_alloc(0x1000, 1024, 1);
464        assert_eq!(event.ptr, 0x1000);
465        assert_eq!(event.size, 1024);
466        assert_eq!(event.thread_id, 1);
467        assert!(event.is_allocation());
468    }
469
470    #[test]
471    fn test_lockfree_backend() {
472        let backend = LockfreeBackend;
473        let event = backend.capture_alloc(0x1000, 1024, 1);
474        assert_eq!(event.ptr, 0x1000);
475        assert_eq!(event.size, 1024);
476        assert!(event.call_stack_hash.is_some());
477    }
478
479    #[test]
480    fn test_async_backend() {
481        let backend = AsyncBackend;
482        let event = backend.capture_alloc(0x1000, 1024, 1);
483        assert_eq!(event.ptr, 0x1000);
484        assert_eq!(event.size, 1024);
485    }
486
487    #[test]
488    fn test_unified_backend() {
489        let backend = UnifiedCaptureBackend::default();
490        let event = backend.capture_alloc(0x1000, 1024, 1);
491        assert_eq!(event.ptr, 0x1000);
492        assert_eq!(event.size, 1024);
493    }
494
495    #[test]
496    fn test_backend_type_creation() {
497        let core_backend = CaptureBackendType::Core.create_backend();
498        let lockfree_backend = CaptureBackendType::Lockfree.create_backend();
499        let async_backend = CaptureBackendType::Async.create_backend();
500        let unified_backend = CaptureBackendType::Unified.create_backend();
501
502        // Test that all backends can capture events
503        let event1 = core_backend.capture_alloc(0x1000, 1024, 1);
504        let event2 = lockfree_backend.capture_alloc(0x2000, 2048, 2);
505        let event3 = async_backend.capture_alloc(0x3000, 3072, 3);
506        let event4 = unified_backend.capture_alloc(0x4000, 4096, 4);
507
508        assert_eq!(event1.ptr, 0x1000);
509        assert_eq!(event2.ptr, 0x2000);
510        assert_eq!(event3.ptr, 0x3000);
511        assert_eq!(event4.ptr, 0x4000);
512    }
513}