1use serde::{Deserialize, Serialize};
7use std::sync::atomic::{AtomicU64, Ordering};
8use std::sync::Mutex;
9use std::time::Duration;
10
11#[derive(Debug, Default)]
13struct ColdStats {
14 active_allocations: u64,
15 active_memory: u64,
16 peak_allocations: u64,
17 peak_memory: u64,
18 total_deallocations: u64,
19 total_deallocated: u64,
20 leaked_allocations: u64,
21 leaked_memory: u64,
22}
23
24#[repr(align(64))]
27#[derive(Debug)]
28pub struct SimpleMemoryStats {
29 pub allocation_count: AtomicU64,
31 pub total_allocated: AtomicU64,
33 detailed: Mutex<ColdStats>,
35}
36
37impl SimpleMemoryStats {
38 pub fn new() -> Self {
40 Self {
41 allocation_count: AtomicU64::new(0),
42 total_allocated: AtomicU64::new(0),
43 detailed: Mutex::new(ColdStats::default()),
44 }
45 }
46
47 pub fn record_allocation_fast(&self, size: u64) {
49 self.allocation_count.fetch_add(1, Ordering::Relaxed);
50 self.total_allocated.fetch_add(size, Ordering::Relaxed);
51 }
52
53 pub fn record_allocation_detailed(&self, size: u64) {
55 self.record_allocation_fast(size);
57
58 if let Ok(mut stats) = self.detailed.try_lock() {
60 stats.active_allocations += 1;
61 stats.active_memory += size;
62
63 if stats.active_allocations > stats.peak_allocations {
65 stats.peak_allocations = stats.active_allocations;
66 }
67 if stats.active_memory > stats.peak_memory {
68 stats.peak_memory = stats.active_memory;
69 }
70 }
71 }
72
73 pub fn record_deallocation(&self, size: u64) {
75 if let Ok(mut stats) = self.detailed.try_lock() {
76 stats.total_deallocations += 1;
77 stats.total_deallocated += size;
78 stats.active_allocations = stats.active_allocations.saturating_sub(1);
79 stats.active_memory = stats.active_memory.saturating_sub(size);
80 }
81 }
82
83 pub fn record_leak(&self, size: u64) {
85 if let Ok(mut stats) = self.detailed.try_lock() {
86 stats.leaked_allocations += 1;
87 stats.leaked_memory += size;
88 }
89 }
90
91 pub fn snapshot(&self) -> MemoryStatsSnapshot {
93 let allocation_count = self.allocation_count.load(Ordering::Relaxed);
94 let total_allocated = self.total_allocated.load(Ordering::Relaxed);
95
96 let detailed = self
98 .detailed
99 .try_lock()
100 .map(|stats| ColdStats {
101 active_allocations: stats.active_allocations,
102 active_memory: stats.active_memory,
103 peak_allocations: stats.peak_allocations,
104 peak_memory: stats.peak_memory,
105 total_deallocations: stats.total_deallocations,
106 total_deallocated: stats.total_deallocated,
107 leaked_allocations: stats.leaked_allocations,
108 leaked_memory: stats.leaked_memory,
109 })
110 .unwrap_or_default();
111
112 MemoryStatsSnapshot {
113 total_allocations: allocation_count,
114 total_allocated,
115 active_allocations: detailed.active_allocations,
116 active_memory: detailed.active_memory,
117 peak_allocations: detailed.peak_allocations,
118 peak_memory: detailed.peak_memory,
119 total_deallocations: detailed.total_deallocations,
120 total_deallocated: detailed.total_deallocated,
121 leaked_allocations: detailed.leaked_allocations,
122 leaked_memory: detailed.leaked_memory,
123 }
124 }
125
126 pub fn reset(&self) {
128 self.allocation_count.store(0, Ordering::Relaxed);
129 self.total_allocated.store(0, Ordering::Relaxed);
130
131 if let Ok(mut stats) = self.detailed.try_lock() {
132 *stats = ColdStats::default();
133 }
134 }
135}
136
137impl Default for SimpleMemoryStats {
138 fn default() -> Self {
139 Self::new()
140 }
141}
142
143#[derive(Debug)]
145pub struct AtomicMemoryStats {
146 pub total_allocations: AtomicU64,
148 pub total_allocated: AtomicU64,
150 pub active_allocations: AtomicU64,
152 pub active_memory: AtomicU64,
154 pub peak_allocations: AtomicU64,
156 pub peak_memory: AtomicU64,
158 pub total_deallocations: AtomicU64,
160 pub total_deallocated: AtomicU64,
162 pub leaked_allocations: AtomicU64,
164 pub leaked_memory: AtomicU64,
166}
167
168impl AtomicMemoryStats {
169 pub fn new() -> Self {
171 Self {
172 total_allocations: AtomicU64::new(0),
173 total_allocated: AtomicU64::new(0),
174 active_allocations: AtomicU64::new(0),
175 active_memory: AtomicU64::new(0),
176 peak_allocations: AtomicU64::new(0),
177 peak_memory: AtomicU64::new(0),
178 total_deallocations: AtomicU64::new(0),
179 total_deallocated: AtomicU64::new(0),
180 leaked_allocations: AtomicU64::new(0),
181 leaked_memory: AtomicU64::new(0),
182 }
183 }
184
185 pub fn record_allocation(&self, size: u64) {
187 self.total_allocations.fetch_add(1, Ordering::Relaxed);
188 self.total_allocated.fetch_add(size, Ordering::Relaxed);
189 self.active_allocations.fetch_add(1, Ordering::Relaxed);
190 let new_active_memory = self.active_memory.fetch_add(size, Ordering::Relaxed) + size;
191
192 self.update_peak_allocations();
194 self.update_peak_memory(new_active_memory);
195 }
196
197 pub fn record_deallocation(&self, size: u64) {
199 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
200 self.total_deallocated.fetch_add(size, Ordering::Relaxed);
201 self.active_allocations.fetch_sub(1, Ordering::Relaxed);
202 self.active_memory.fetch_sub(size, Ordering::Relaxed);
203 }
204
205 pub fn record_leak(&self, size: u64) {
207 self.leaked_allocations.fetch_add(1, Ordering::Relaxed);
208 self.leaked_memory.fetch_add(size, Ordering::Relaxed);
209 }
210
211 fn update_peak_allocations(&self) {
213 let current_active = self.active_allocations.load(Ordering::Relaxed);
214 let mut current_peak = self.peak_allocations.load(Ordering::Relaxed);
215
216 while current_active > current_peak {
217 match self.peak_allocations.compare_exchange_weak(
218 current_peak,
219 current_active,
220 Ordering::Relaxed,
221 Ordering::Relaxed,
222 ) {
223 Ok(_) => break,
224 Err(actual) => current_peak = actual,
225 }
226 }
227 }
228
229 fn update_peak_memory(&self, new_memory: u64) {
231 let mut current_peak = self.peak_memory.load(Ordering::Relaxed);
232
233 while new_memory > current_peak {
234 match self.peak_memory.compare_exchange_weak(
235 current_peak,
236 new_memory,
237 Ordering::Relaxed,
238 Ordering::Relaxed,
239 ) {
240 Ok(_) => break,
241 Err(actual) => current_peak = actual,
242 }
243 }
244 }
245
246 pub fn snapshot(&self) -> MemoryStatsSnapshot {
248 MemoryStatsSnapshot {
249 total_allocations: self.total_allocations.load(Ordering::Relaxed),
250 total_allocated: self.total_allocated.load(Ordering::Relaxed),
251 active_allocations: self.active_allocations.load(Ordering::Relaxed),
252 active_memory: self.active_memory.load(Ordering::Relaxed),
253 peak_allocations: self.peak_allocations.load(Ordering::Relaxed),
254 peak_memory: self.peak_memory.load(Ordering::Relaxed),
255 total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
256 total_deallocated: self.total_deallocated.load(Ordering::Relaxed),
257 leaked_allocations: self.leaked_allocations.load(Ordering::Relaxed),
258 leaked_memory: self.leaked_memory.load(Ordering::Relaxed),
259 }
260 }
261
262 pub fn reset(&self) {
264 self.total_allocations.store(0, Ordering::Relaxed);
265 self.total_allocated.store(0, Ordering::Relaxed);
266 self.active_allocations.store(0, Ordering::Relaxed);
267 self.active_memory.store(0, Ordering::Relaxed);
268 self.peak_allocations.store(0, Ordering::Relaxed);
269 self.peak_memory.store(0, Ordering::Relaxed);
270 self.total_deallocations.store(0, Ordering::Relaxed);
271 self.total_deallocated.store(0, Ordering::Relaxed);
272 self.leaked_allocations.store(0, Ordering::Relaxed);
273 self.leaked_memory.store(0, Ordering::Relaxed);
274 }
275}
276
277impl Default for AtomicMemoryStats {
278 fn default() -> Self {
279 Self::new()
280 }
281}
282
283#[derive(Debug, Clone, Serialize, Deserialize)]
285pub struct MemoryStatsSnapshot {
286 pub total_allocations: u64,
287 pub total_allocated: u64,
288 pub active_allocations: u64,
289 pub active_memory: u64,
290 pub peak_allocations: u64,
291 pub peak_memory: u64,
292 pub total_deallocations: u64,
293 pub total_deallocated: u64,
294 pub leaked_allocations: u64,
295 pub leaked_memory: u64,
296}
297
298#[derive(Debug)]
300pub struct AtomicPerformanceCounters {
301 pub clone_count: AtomicU64,
303 pub lock_acquisitions: AtomicU64,
305 pub lock_contentions: AtomicU64,
307 pub lock_wait_time_ns: AtomicU64,
309 pub cache_hits: AtomicU64,
311 pub cache_misses: AtomicU64,
313}
314
315impl AtomicPerformanceCounters {
316 pub fn new() -> Self {
318 Self {
319 clone_count: AtomicU64::new(0),
320 lock_acquisitions: AtomicU64::new(0),
321 lock_contentions: AtomicU64::new(0),
322 lock_wait_time_ns: AtomicU64::new(0),
323 cache_hits: AtomicU64::new(0),
324 cache_misses: AtomicU64::new(0),
325 }
326 }
327
328 pub fn record_clone(&self) {
330 self.clone_count.fetch_add(1, Ordering::Relaxed);
331 }
332
333 pub fn record_lock_acquisition(&self, wait_time: Duration) {
335 self.lock_acquisitions.fetch_add(1, Ordering::Relaxed);
336 self.lock_wait_time_ns
337 .fetch_add(wait_time.as_nanos() as u64, Ordering::Relaxed);
338 }
339
340 pub fn record_lock_contention(&self) {
342 self.lock_contentions.fetch_add(1, Ordering::Relaxed);
343 }
344
345 pub fn record_cache_hit(&self) {
347 self.cache_hits.fetch_add(1, Ordering::Relaxed);
348 }
349
350 pub fn record_cache_miss(&self) {
352 self.cache_misses.fetch_add(1, Ordering::Relaxed);
353 }
354
355 pub fn snapshot(&self) -> PerformanceSnapshot {
357 PerformanceSnapshot {
358 clone_count: self.clone_count.load(Ordering::Relaxed),
359 lock_acquisitions: self.lock_acquisitions.load(Ordering::Relaxed),
360 lock_contentions: self.lock_contentions.load(Ordering::Relaxed),
361 lock_wait_time_ns: self.lock_wait_time_ns.load(Ordering::Relaxed),
362 cache_hits: self.cache_hits.load(Ordering::Relaxed),
363 cache_misses: self.cache_misses.load(Ordering::Relaxed),
364 }
365 }
366}
367
368impl Default for AtomicPerformanceCounters {
369 fn default() -> Self {
370 Self::new()
371 }
372}
373
374#[derive(Debug, Clone, Serialize, Deserialize)]
376pub struct PerformanceSnapshot {
377 pub clone_count: u64,
378 pub lock_acquisitions: u64,
379 pub lock_contentions: u64,
380 pub lock_wait_time_ns: u64,
381 pub cache_hits: u64,
382 pub cache_misses: u64,
383}
384
385impl PerformanceSnapshot {
386 pub fn cache_hit_ratio(&self) -> f64 {
388 let total = self.cache_hits + self.cache_misses;
389 if total > 0 {
390 self.cache_hits as f64 / total as f64
391 } else {
392 0.0
393 }
394 }
395
396 pub fn avg_lock_wait_time_ns(&self) -> f64 {
398 if self.lock_acquisitions > 0 {
399 self.lock_wait_time_ns as f64 / self.lock_acquisitions as f64
400 } else {
401 0.0
402 }
403 }
404
405 pub fn lock_contention_ratio(&self) -> f64 {
407 if self.lock_acquisitions > 0 {
408 self.lock_contentions as f64 / self.lock_acquisitions as f64
409 } else {
410 0.0
411 }
412 }
413}
414
415static GLOBAL_ATOMIC_STATS: std::sync::OnceLock<AtomicMemoryStats> = std::sync::OnceLock::new();
417
418pub fn get_global_atomic_stats() -> &'static AtomicMemoryStats {
420 GLOBAL_ATOMIC_STATS.get_or_init(AtomicMemoryStats::new)
421}
422
423#[cfg(test)]
424mod tests {
425 use super::*;
426 use std::time::Duration;
427
428 #[test]
429 fn test_simple_memory_stats_creation() {
430 let stats = SimpleMemoryStats::new();
431 let snapshot = stats.snapshot();
432
433 assert_eq!(snapshot.total_allocations, 0);
434 assert_eq!(snapshot.total_allocated, 0);
435 assert_eq!(snapshot.active_allocations, 0);
436 assert_eq!(snapshot.active_memory, 0);
437 }
438
439 #[test]
440 fn test_simple_memory_stats_fast_allocation() {
441 let stats = SimpleMemoryStats::new();
442 stats.record_allocation_fast(1024);
443
444 let snapshot = stats.snapshot();
445 assert_eq!(snapshot.total_allocations, 1);
446 assert_eq!(snapshot.total_allocated, 1024);
447 }
448
449 #[test]
450 fn test_simple_memory_stats_detailed_allocation() {
451 let stats = SimpleMemoryStats::new();
452 stats.record_allocation_detailed(2048);
453
454 let snapshot = stats.snapshot();
455 assert_eq!(snapshot.total_allocations, 1);
456 assert_eq!(snapshot.total_allocated, 2048);
457 assert_eq!(snapshot.active_allocations, 1);
458 assert_eq!(snapshot.active_memory, 2048);
459 assert_eq!(snapshot.peak_allocations, 1);
460 assert_eq!(snapshot.peak_memory, 2048);
461 }
462
463 #[test]
464 fn test_simple_memory_stats_deallocation() {
465 let stats = SimpleMemoryStats::new();
466 stats.record_allocation_detailed(1024);
467 stats.record_deallocation(1024);
468
469 let snapshot = stats.snapshot();
470 assert_eq!(snapshot.total_deallocations, 1);
471 assert_eq!(snapshot.total_deallocated, 1024);
472 assert_eq!(snapshot.active_allocations, 0);
473 assert_eq!(snapshot.active_memory, 0);
474 }
475
476 #[test]
477 fn test_simple_memory_stats_leak() {
478 let stats = SimpleMemoryStats::new();
479 stats.record_leak(512);
480
481 let snapshot = stats.snapshot();
482 assert_eq!(snapshot.leaked_allocations, 1);
483 assert_eq!(snapshot.leaked_memory, 512);
484 }
485
486 #[test]
487 fn test_simple_memory_stats_reset() {
488 let stats = SimpleMemoryStats::new();
489 stats.record_allocation_fast(1024);
490 stats.record_leak(256);
491
492 stats.reset();
493 let snapshot = stats.snapshot();
494
495 assert_eq!(snapshot.total_allocations, 0);
496 assert_eq!(snapshot.total_allocated, 0);
497 }
498
499 #[test]
500 fn test_atomic_memory_stats_creation() {
501 let stats = AtomicMemoryStats::new();
502 let snapshot = stats.snapshot();
503
504 assert_eq!(snapshot.total_allocations, 0);
505 assert_eq!(snapshot.total_allocated, 0);
506 assert_eq!(snapshot.active_allocations, 0);
507 assert_eq!(snapshot.active_memory, 0);
508 }
509
510 #[test]
511 fn test_atomic_memory_stats_allocation() {
512 let stats = AtomicMemoryStats::new();
513 stats.record_allocation(1024);
514
515 let snapshot = stats.snapshot();
516 assert_eq!(snapshot.total_allocations, 1);
517 assert_eq!(snapshot.total_allocated, 1024);
518 assert_eq!(snapshot.active_allocations, 1);
519 assert_eq!(snapshot.active_memory, 1024);
520 assert_eq!(snapshot.peak_allocations, 1);
521 assert_eq!(snapshot.peak_memory, 1024);
522 }
523
524 #[test]
525 fn test_atomic_memory_stats_multiple_allocations() {
526 let stats = AtomicMemoryStats::new();
527
528 stats.record_allocation(512);
529 stats.record_allocation(1024);
530 stats.record_allocation(256);
531
532 let snapshot = stats.snapshot();
533 assert_eq!(snapshot.total_allocations, 3);
534 assert_eq!(snapshot.total_allocated, 1792);
535 assert_eq!(snapshot.active_allocations, 3);
536 assert_eq!(snapshot.active_memory, 1792);
537 assert_eq!(snapshot.peak_memory, 1792);
538 }
539
540 #[test]
541 fn test_atomic_memory_stats_deallocation() {
542 let stats = AtomicMemoryStats::new();
543 stats.record_allocation(1024);
544 stats.record_deallocation(1024);
545
546 let snapshot = stats.snapshot();
547 assert_eq!(snapshot.total_allocations, 1);
548 assert_eq!(snapshot.total_deallocations, 1);
549 assert_eq!(snapshot.total_deallocated, 1024);
550 assert_eq!(snapshot.active_allocations, 0);
551 assert_eq!(snapshot.active_memory, 0);
552 }
553
554 #[test]
555 fn test_atomic_memory_stats_peak_tracking() {
556 let stats = AtomicMemoryStats::new();
557
558 stats.record_allocation(1000);
560 stats.record_allocation(2000);
561 let peak_snapshot = stats.snapshot();
562
563 stats.record_deallocation(1000);
565 let current_snapshot = stats.snapshot();
566
567 assert_eq!(peak_snapshot.peak_memory, 3000);
569 assert_eq!(current_snapshot.peak_memory, 3000);
570 assert_eq!(current_snapshot.active_memory, 2000);
571 }
572
573 #[test]
574 fn test_atomic_memory_stats_leak() {
575 let stats = AtomicMemoryStats::new();
576 stats.record_leak(512);
577
578 let snapshot = stats.snapshot();
579 assert_eq!(snapshot.leaked_allocations, 1);
580 assert_eq!(snapshot.leaked_memory, 512);
581 }
582
583 #[test]
584 fn test_atomic_memory_stats_reset() {
585 let stats = AtomicMemoryStats::new();
586 stats.record_allocation(1024);
587 stats.record_leak(256);
588
589 stats.reset();
590 let snapshot = stats.snapshot();
591
592 assert_eq!(snapshot.total_allocations, 0);
593 assert_eq!(snapshot.total_allocated, 0);
594 assert_eq!(snapshot.leaked_allocations, 0);
595 assert_eq!(snapshot.leaked_memory, 0);
596 }
597
598 #[test]
599 fn test_atomic_performance_counters_creation() {
600 let counters = AtomicPerformanceCounters::new();
601 let snapshot = counters.snapshot();
602
603 assert_eq!(snapshot.clone_count, 0);
604 assert_eq!(snapshot.lock_acquisitions, 0);
605 assert_eq!(snapshot.lock_contentions, 0);
606 assert_eq!(snapshot.cache_hits, 0);
607 assert_eq!(snapshot.cache_misses, 0);
608 }
609
610 #[test]
611 fn test_atomic_performance_counters_clone() {
612 let counters = AtomicPerformanceCounters::new();
613 counters.record_clone();
614 counters.record_clone();
615
616 let snapshot = counters.snapshot();
617 assert_eq!(snapshot.clone_count, 2);
618 }
619
620 #[test]
621 fn test_atomic_performance_counters_lock_acquisition() {
622 let counters = AtomicPerformanceCounters::new();
623 let wait_time = Duration::from_millis(10);
624 counters.record_lock_acquisition(wait_time);
625
626 let snapshot = counters.snapshot();
627 assert_eq!(snapshot.lock_acquisitions, 1);
628 assert_eq!(snapshot.lock_wait_time_ns, wait_time.as_nanos() as u64);
629 }
630
631 #[test]
632 fn test_atomic_performance_counters_lock_contention() {
633 let counters = AtomicPerformanceCounters::new();
634 counters.record_lock_contention();
635 counters.record_lock_contention();
636
637 let snapshot = counters.snapshot();
638 assert_eq!(snapshot.lock_contentions, 2);
639 }
640
641 #[test]
642 fn test_atomic_performance_counters_cache() {
643 let counters = AtomicPerformanceCounters::new();
644 counters.record_cache_hit();
645 counters.record_cache_hit();
646 counters.record_cache_miss();
647
648 let snapshot = counters.snapshot();
649 assert_eq!(snapshot.cache_hits, 2);
650 assert_eq!(snapshot.cache_misses, 1);
651 }
652
653 #[test]
654 fn test_performance_snapshot_calculations() {
655 let snapshot = PerformanceSnapshot {
656 clone_count: 100,
657 lock_acquisitions: 80,
658 lock_contentions: 8,
659 lock_wait_time_ns: 800_000,
660 cache_hits: 90,
661 cache_misses: 10,
662 };
663
664 let hit_ratio = snapshot.cache_hit_ratio();
666 assert!((hit_ratio - 0.9).abs() < f64::EPSILON);
667
668 let avg_wait = snapshot.avg_lock_wait_time_ns();
670 assert!((avg_wait - 10_000.0).abs() < f64::EPSILON);
671
672 let contention_ratio = snapshot.lock_contention_ratio();
674 assert!((contention_ratio - 0.1).abs() < f64::EPSILON);
675 }
676
677 #[test]
678 fn test_performance_snapshot_edge_cases() {
679 let empty_snapshot = PerformanceSnapshot {
680 clone_count: 0,
681 lock_acquisitions: 0,
682 lock_contentions: 0,
683 lock_wait_time_ns: 0,
684 cache_hits: 0,
685 cache_misses: 0,
686 };
687
688 assert_eq!(empty_snapshot.cache_hit_ratio(), 0.0);
689 assert_eq!(empty_snapshot.avg_lock_wait_time_ns(), 0.0);
690 assert_eq!(empty_snapshot.lock_contention_ratio(), 0.0);
691 }
692
693 #[test]
694 fn test_memory_stats_snapshot_creation() {
695 let snapshot = MemoryStatsSnapshot {
696 total_allocations: 100,
697 total_allocated: 10240,
698 active_allocations: 50,
699 active_memory: 5120,
700 peak_allocations: 80,
701 peak_memory: 8192,
702 total_deallocations: 50,
703 total_deallocated: 5120,
704 leaked_allocations: 5,
705 leaked_memory: 512,
706 };
707
708 assert_eq!(snapshot.total_allocations, 100);
709 assert_eq!(snapshot.total_allocated, 10240);
710 assert_eq!(snapshot.active_allocations, 50);
711 assert_eq!(snapshot.peak_memory, 8192);
712 assert_eq!(snapshot.leaked_allocations, 5);
713 }
714
715 #[test]
716 fn test_default_implementations() {
717 let simple_stats = SimpleMemoryStats::default();
718 let atomic_stats = AtomicMemoryStats::default();
719 let counters = AtomicPerformanceCounters::default();
720
721 assert_eq!(simple_stats.snapshot().total_allocations, 0);
722 assert_eq!(atomic_stats.snapshot().total_allocations, 0);
723 assert_eq!(counters.snapshot().clone_count, 0);
724 }
725
726 #[test]
727 fn test_global_atomic_stats() {
728 let stats = get_global_atomic_stats();
729
730 let stats2 = get_global_atomic_stats();
732 assert!(std::ptr::eq(stats, stats2));
733
734 stats.record_allocation(1024);
736 let snapshot = stats.snapshot();
737 assert!(snapshot.total_allocations > 0);
738 }
739}