1pub mod core_tracker;
10pub mod core_types;
11pub mod export_options;
12
13pub mod lockfree_tracker;
15pub mod lockfree_types;
16
17pub mod async_tracker;
19pub mod async_types;
20
21pub mod task_profile;
23
24pub mod efficiency_scoring;
26
27pub mod bottleneck_analysis;
29
30pub mod hotspot_analysis;
32
33pub mod resource_ranking;
35
36pub mod unsafe_tracking;
38
39pub mod unified_tracker;
41
42pub mod global_tracking;
44
45use crate::event_store::{MemoryEvent, MemoryEventType};
46
47pub use core_tracker::{
49 collect_all_trackers_local, configure_tracking_strategy, get_registry_stats_local, get_tracker,
50 MemoryTracker,
51};
52
53pub use export_options::{ExportMode, ExportOptions};
55
56pub use async_tracker::{
58 create_tracked, get_memory_snapshot, initialize, is_tracking_active, shutdown, spawn_tracked,
59 track_current_allocation, track_current_deallocation, AsyncTracker,
60};
61pub use async_types::{
62 AsyncAllocation, AsyncError, AsyncMemorySnapshot, AsyncResult, AsyncSnapshot, AsyncStats,
63 ExtendedTaskInfo, TaskId, TaskInfo, TrackedFuture,
64};
65
66pub use task_profile::{AggregatedTaskStats, TaskMemoryProfile, TaskProfileManager, TaskType};
68
69pub use efficiency_scoring::{
71 ComponentScores, EfficiencyConfig, EfficiencyScorer, EfficiencyWeights,
72};
73
74pub use bottleneck_analysis::{
76 BottleneckAnalyzer, BottleneckConfig, BottleneckKind, BottleneckMetrics, PerformanceIssue,
77 TaskMetrics,
78};
79
80pub use hotspot_analysis::{
82 AllocationFrequencyPattern, CallStackHotspot, FrequencyAnalysis, HotspotAnalyzer,
83 HotspotConfig, HotspotStatistics, MemoryUsagePeak,
84};
85
86pub use resource_ranking::{
88 EfficiencyScores, RankingConfig, RankingStatistics, ResourceRanking, ResourceRankingAnalyzer,
89 TaskResourceMetrics,
90};
91
92pub use unsafe_tracking::{
94 AllocationInfo, AllocationOrigin, AllocationSource, MemoryPassport, OwnershipInfo,
95 PassportStamp, SafetyViolation, SecurityClearance, UnsafeTracker, UnsafeTrackingConfig,
96 UnsafeTrackingStats, ValidityStatus, ViolationSeverity,
97};
98
99pub use lockfree_tracker::{
101 finalize_thread_tracker, get_current_tracker, init_thread_tracker, is_tracking,
102 memory_snapshot, quick_trace, stop_tracing, trace_all, trace_thread, track_allocation_lockfree,
103 track_deallocation_lockfree, ThreadLocalTracker,
104};
105pub use lockfree_types::{
106 AllocationCategory, AnalysisSummary, Event, EventType, FrequencyData, FrequencyPattern,
107 InteractionType, LockfreeAnalysis, MemorySnapshot, MemoryStats, SamplingConfig, SystemMetrics,
108 ThreadInteraction, ThreadStats,
109};
110
111pub use unified_tracker::{
113 detect_environment, get_backend, initialize as initialize_unified, AsyncRuntimeType,
114 BackendConfig, DetectionConfig, DispatcherConfig, DispatcherMetrics, EnvironmentDetection,
115 EnvironmentDetector, MemoryAnalysisData, MemoryStatistics,
116 MemoryTracker as UnifiedMemoryTracker, RuntimeEnvironment, SessionMetadata, TrackerConfig,
117 TrackerStatistics, TrackerType, TrackingDispatcher, TrackingOperation, TrackingSession,
118 TrackingStrategy, UnifiedBackend,
119};
120
121pub use global_tracking::{
123 global_tracker, init_global_tracking, init_global_tracking_with_config, is_initialized,
124 GlobalTracker, GlobalTrackerConfig, GlobalTrackerStats,
125};
126
127pub trait CaptureBackend: Send + Sync {
132 fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent;
134
135 fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent;
137
138 fn capture_realloc(
140 &self,
141 ptr: usize,
142 old_size: usize,
143 new_size: usize,
144 thread_id: u64,
145 ) -> MemoryEvent;
146
147 fn capture_move(
149 &self,
150 _from_ptr: usize,
151 to_ptr: usize,
152 size: usize,
153 thread_id: u64,
154 ) -> MemoryEvent;
155}
156
157#[derive(Debug, Clone, Copy, PartialEq, Eq)]
159pub enum CaptureBackendType {
160 Core,
162 Lockfree,
164 Async,
166 Unified,
168}
169
170impl CaptureBackendType {
171 pub fn create_backend(&self) -> Box<dyn CaptureBackend> {
173 match self {
174 CaptureBackendType::Core => Box::new(CoreBackend),
175 CaptureBackendType::Lockfree => Box::new(LockfreeBackend),
176 CaptureBackendType::Async => Box::new(AsyncBackend),
177 CaptureBackendType::Unified => Box::new(UnifiedCaptureBackend::new()),
178 }
179 }
180}
181
182#[derive(Debug)]
186pub struct CoreBackend;
187
188impl CaptureBackend for CoreBackend {
189 fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
190 MemoryEvent::allocate(ptr, size, thread_id)
191 }
192
193 fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
194 MemoryEvent::deallocate(ptr, size, thread_id)
195 }
196
197 fn capture_realloc(
198 &self,
199 ptr: usize,
200 old_size: usize,
201 new_size: usize,
202 thread_id: u64,
203 ) -> MemoryEvent {
204 MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
205 }
206
207 fn capture_move(
208 &self,
209 _from_ptr: usize,
210 to_ptr: usize,
211 size: usize,
212 thread_id: u64,
213 ) -> MemoryEvent {
214 MemoryEvent {
215 timestamp: MemoryEvent::now(),
216 event_type: MemoryEventType::Move,
217 ptr: to_ptr,
218 size,
219 old_size: None,
220 thread_id,
221 var_name: None,
222 type_name: None,
223 call_stack_hash: None,
224 thread_name: None,
225 source_file: None,
226 source_line: None,
227 module_path: None,
228 clone_source_ptr: None,
229 clone_target_ptr: None,
230 stack_ptr: None,
231 task_id: None,
232 }
233 }
234}
235
236#[derive(Debug)]
241pub struct LockfreeBackend;
242
243impl CaptureBackend for LockfreeBackend {
244 fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
245 MemoryEvent::allocate(ptr, size, thread_id).with_call_stack_hash(self.hash_call_stack())
246 }
247
248 fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
249 MemoryEvent::deallocate(ptr, size, thread_id).with_call_stack_hash(self.hash_call_stack())
250 }
251
252 fn capture_realloc(
253 &self,
254 ptr: usize,
255 old_size: usize,
256 new_size: usize,
257 thread_id: u64,
258 ) -> MemoryEvent {
259 MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
260 .with_call_stack_hash(self.hash_call_stack())
261 }
262
263 fn capture_move(
264 &self,
265 _from_ptr: usize,
266 to_ptr: usize,
267 size: usize,
268 thread_id: u64,
269 ) -> MemoryEvent {
270 MemoryEvent {
271 timestamp: MemoryEvent::now(),
272 event_type: MemoryEventType::Move,
273 ptr: to_ptr,
274 size,
275 old_size: None,
276 thread_id,
277 var_name: None,
278 type_name: None,
279 call_stack_hash: Some(self.hash_call_stack()),
280 thread_name: None,
281 source_file: None,
282 source_line: None,
283 module_path: None,
284 clone_source_ptr: None,
285 clone_target_ptr: None,
286 stack_ptr: None,
287 task_id: None,
288 }
289 }
290}
291
292impl LockfreeBackend {
293 #[inline]
299 fn hash_call_stack(&self) -> u64 {
300 use std::collections::hash_map::DefaultHasher;
301 use std::hash::{Hash, Hasher};
302
303 let mut hasher = DefaultHasher::new();
304
305 std::thread::current().id().hash(&mut hasher);
307
308 static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
311 let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
312 count.hash(&mut hasher);
313
314 hasher.finish()
315 }
316}
317
318#[derive(Debug)]
322pub struct AsyncBackend;
323
324impl CaptureBackend for AsyncBackend {
325 fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
326 MemoryEvent::allocate(ptr, size, thread_id)
327 }
328
329 fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
330 MemoryEvent::deallocate(ptr, size, thread_id)
331 }
332
333 fn capture_realloc(
334 &self,
335 ptr: usize,
336 old_size: usize,
337 new_size: usize,
338 thread_id: u64,
339 ) -> MemoryEvent {
340 MemoryEvent::reallocate(ptr, old_size, new_size, thread_id)
341 }
342
343 fn capture_move(
344 &self,
345 _from_ptr: usize,
346 to_ptr: usize,
347 size: usize,
348 thread_id: u64,
349 ) -> MemoryEvent {
350 MemoryEvent {
351 timestamp: MemoryEvent::now(),
352 event_type: MemoryEventType::Move,
353 ptr: to_ptr,
354 size,
355 old_size: None,
356 thread_id,
357 var_name: None,
358 type_name: None,
359 call_stack_hash: None,
360 thread_name: None,
361 source_file: None,
362 source_line: None,
363 module_path: None,
364 clone_source_ptr: None,
365 clone_target_ptr: None,
366 stack_ptr: None,
367 task_id: None,
368 }
369 }
370}
371
372pub struct UnifiedCaptureBackend {
377 inner: Box<dyn CaptureBackend>,
379 backend_type: CaptureBackendType,
381}
382
383impl UnifiedCaptureBackend {
384 fn detect_best_backend() -> (Box<dyn CaptureBackend>, CaptureBackendType) {
394 let thread_count = std::thread::available_parallelism()
395 .map(|p| p.get())
396 .unwrap_or(1);
397
398 if thread_count <= 1 {
399 (Box::new(CoreBackend), CaptureBackendType::Core)
400 } else {
401 (Box::new(LockfreeBackend), CaptureBackendType::Lockfree)
402 }
403 }
404
405 pub fn new() -> Self {
407 let (inner, backend_type) = Self::detect_best_backend();
408 Self {
409 inner,
410 backend_type,
411 }
412 }
413
414 pub fn backend_type(&self) -> CaptureBackendType {
416 self.backend_type
417 }
418
419 pub fn refresh_backend(&mut self) {
428 let (new_inner, new_type) = Self::detect_best_backend();
429 self.inner = new_inner;
430 self.backend_type = new_type;
431 }
432}
433
434impl Default for UnifiedCaptureBackend {
435 fn default() -> Self {
436 Self::new()
437 }
438}
439
440impl CaptureBackend for UnifiedCaptureBackend {
441 fn capture_alloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
442 self.inner.capture_alloc(ptr, size, thread_id)
443 }
444
445 fn capture_dealloc(&self, ptr: usize, size: usize, thread_id: u64) -> MemoryEvent {
446 self.inner.capture_dealloc(ptr, size, thread_id)
447 }
448
449 fn capture_realloc(
450 &self,
451 ptr: usize,
452 old_size: usize,
453 new_size: usize,
454 thread_id: u64,
455 ) -> MemoryEvent {
456 self.inner
457 .capture_realloc(ptr, old_size, new_size, thread_id)
458 }
459
460 fn capture_move(
461 &self,
462 from_ptr: usize,
463 to_ptr: usize,
464 size: usize,
465 thread_id: u64,
466 ) -> MemoryEvent {
467 self.inner.capture_move(from_ptr, to_ptr, size, thread_id)
468 }
469}
470
471#[cfg(test)]
472mod tests {
473 use super::*;
474
475 #[test]
476 fn test_core_backend() {
477 let backend = CoreBackend;
478 let event = backend.capture_alloc(0x1000, 1024, 1);
479 assert_eq!(event.ptr, 0x1000);
480 assert_eq!(event.size, 1024);
481 assert_eq!(event.thread_id, 1);
482 assert!(event.is_allocation());
483 }
484
485 #[test]
486 fn test_lockfree_backend() {
487 let backend = LockfreeBackend;
488 let event = backend.capture_alloc(0x1000, 1024, 1);
489 assert_eq!(event.ptr, 0x1000);
490 assert_eq!(event.size, 1024);
491 assert!(event.call_stack_hash.is_some());
492 }
493
494 #[test]
495 fn test_async_backend() {
496 let backend = AsyncBackend;
497 let event = backend.capture_alloc(0x1000, 1024, 1);
498 assert_eq!(event.ptr, 0x1000);
499 assert_eq!(event.size, 1024);
500 }
501
502 #[test]
503 fn test_unified_backend() {
504 let backend = UnifiedCaptureBackend::default();
505 let event = backend.capture_alloc(0x1000, 1024, 1);
506 assert_eq!(event.ptr, 0x1000);
507 assert_eq!(event.size, 1024);
508 }
509
510 #[test]
511 fn test_backend_type_creation() {
512 let core_backend = CaptureBackendType::Core.create_backend();
513 let lockfree_backend = CaptureBackendType::Lockfree.create_backend();
514 let async_backend = CaptureBackendType::Async.create_backend();
515 let unified_backend = CaptureBackendType::Unified.create_backend();
516
517 let event1 = core_backend.capture_alloc(0x1000, 1024, 1);
519 let event2 = lockfree_backend.capture_alloc(0x2000, 2048, 2);
520 let event3 = async_backend.capture_alloc(0x3000, 3072, 3);
521 let event4 = unified_backend.capture_alloc(0x4000, 4096, 4);
522
523 assert_eq!(event1.ptr, 0x1000);
524 assert_eq!(event2.ptr, 0x2000);
525 assert_eq!(event3.ptr, 0x3000);
526 assert_eq!(event4.ptr, 0x4000);
527 }
528}