memscope_rs/core/
targeted_optimizations.rs

1//! Targeted optimizations for actual performance bottlenecks
2//!
3//! Based on profiling data, these optimizations target real performance issues
4//! rather than theoretical ones.
5
6use crate::core::atomic_stats::SimpleMemoryStats;
7use crate::core::simple_mutex::SimpleMutex;
8use std::collections::HashMap;
9use std::sync::atomic::{AtomicU64, Ordering};
10
11/// Optimized statistics collector that avoids lock contention
12pub struct FastStatsCollector {
13    // Core memory statistics using cache-line optimized structure
14    memory_stats: SimpleMemoryStats,
15
16    // Additional counters for deallocations
17    pub deallocation_count: AtomicU64,
18    pub total_deallocated: AtomicU64,
19
20    // Low-frequency detailed data uses simple mutex
21    detailed_data: SimpleMutex<DetailedStatsData>,
22}
23
24impl Default for FastStatsCollector {
25    fn default() -> Self {
26        Self::new()
27    }
28}
29
30#[derive(Default)]
31struct DetailedStatsData {
32    allocation_sizes: Vec<usize>,
33    peak_memory: usize,
34    allocation_histogram: HashMap<usize, u64>,
35}
36
37impl FastStatsCollector {
38    pub fn new() -> Self {
39        Self {
40            memory_stats: SimpleMemoryStats::new(),
41            deallocation_count: AtomicU64::new(0),
42            total_deallocated: AtomicU64::new(0),
43            detailed_data: SimpleMutex::new(DetailedStatsData::default()),
44        }
45    }
46
47    /// Fast path: just increment counters (no locks)
48    pub fn record_allocation_fast(&self, size: usize) {
49        self.memory_stats.record_allocation_fast(size as u64);
50    }
51
52    /// Slow path: record detailed data (uses lock, but only when needed)
53    pub fn record_allocation_detailed(&self, size: usize) {
54        // Use the optimized detailed recording
55        self.memory_stats.record_allocation_detailed(size as u64);
56
57        // Only do histogram tracking if we can get the lock quickly
58        #[cfg(feature = "parking-lot")]
59        if let Some(mut data) = self.detailed_data.try_lock() {
60            data.allocation_sizes.push(size);
61            *data.allocation_histogram.entry(size).or_insert(0) += 1;
62
63            // Update peak memory
64            let current_memory = self.memory_stats.snapshot().active_memory as usize;
65            if current_memory > data.peak_memory {
66                data.peak_memory = current_memory;
67            }
68        }
69
70        #[cfg(not(feature = "parking-lot"))]
71        if let Ok(mut data) = self.detailed_data.try_lock() {
72            data.allocation_sizes.push(size);
73            *data.allocation_histogram.entry(size).or_insert(0) += 1;
74
75            // Update peak memory
76            let current_memory = self.memory_stats.snapshot().active_memory as usize;
77            if current_memory > data.peak_memory {
78                data.peak_memory = current_memory;
79            }
80        }
81        // If we can't get the lock, we just skip histogram tracking
82    }
83
84    pub fn record_deallocation(&self, size: usize) {
85        self.deallocation_count.fetch_add(1, Ordering::Relaxed);
86        self.total_deallocated
87            .fetch_add(size as u64, Ordering::Relaxed);
88        self.memory_stats.record_deallocation(size as u64);
89    }
90
91    /// Get basic stats without any locks
92    pub fn get_basic_stats(&self) -> BasicStats {
93        let snapshot = self.memory_stats.snapshot();
94        BasicStats {
95            allocation_count: snapshot.total_allocations,
96            deallocation_count: self.deallocation_count.load(Ordering::Relaxed),
97            total_allocated: snapshot.total_allocated,
98            total_deallocated: self.total_deallocated.load(Ordering::Relaxed),
99        }
100    }
101
102    /// Get detailed stats (may use lock, but with timeout)
103    pub fn get_detailed_stats(&self) -> Option<DetailedStats> {
104        let basic = self.get_basic_stats();
105
106        #[cfg(feature = "parking-lot")]
107        {
108            self.detailed_data.try_lock().map(|data| DetailedStats {
109                basic,
110                peak_memory: data.peak_memory,
111                avg_allocation_size: if !data.allocation_sizes.is_empty() {
112                    data.allocation_sizes.iter().sum::<usize>() / data.allocation_sizes.len()
113                } else {
114                    0
115                },
116                allocation_count_by_size: data.allocation_histogram.clone(),
117            })
118        }
119
120        #[cfg(not(feature = "parking-lot"))]
121        {
122            if let Ok(data) = self.detailed_data.try_lock() {
123                Some(DetailedStats {
124                    basic,
125                    peak_memory: data.peak_memory,
126                    avg_allocation_size: if !data.allocation_sizes.is_empty() {
127                        data.allocation_sizes.iter().sum::<usize>() / data.allocation_sizes.len()
128                    } else {
129                        0
130                    },
131                    allocation_count_by_size: data.allocation_histogram.clone(),
132                })
133            } else {
134                // If we can't get detailed stats, return basic stats only
135                None
136            }
137        }
138    }
139}
140
141#[derive(Debug, Clone)]
142pub struct BasicStats {
143    pub allocation_count: u64,
144    pub deallocation_count: u64,
145    pub total_allocated: u64,
146    pub total_deallocated: u64,
147}
148
149#[derive(Debug, Clone)]
150pub struct DetailedStats {
151    pub basic: BasicStats,
152    pub peak_memory: usize,
153    pub avg_allocation_size: usize,
154    pub allocation_count_by_size: HashMap<usize, u64>,
155}
156
157/// Optimized unwrap replacement for hot paths
158pub fn fast_unwrap_or_default<T: Default>(option: Option<T>) -> T {
159    option.unwrap_or_default()
160}
161
162/// Optimized unwrap for results in hot paths
163pub fn fast_unwrap_result_or_default<T: Default, E>(result: Result<T, E>) -> T {
164    result.unwrap_or_default()
165}
166
167/// Batch operations to reduce lock frequency
168pub struct BatchProcessor<T> {
169    batch: SimpleMutex<Vec<T>>,
170    batch_size: usize,
171    #[allow(clippy::type_complexity)]
172    processor: Box<dyn Fn(&[T]) + Send + Sync>,
173}
174
175impl<T> BatchProcessor<T> {
176    pub fn new<F>(batch_size: usize, processor: F) -> Self
177    where
178        F: Fn(&[T]) + Send + Sync + 'static,
179    {
180        Self {
181            batch: SimpleMutex::new(Vec::with_capacity(batch_size)),
182            batch_size,
183            processor: Box::new(processor),
184        }
185    }
186
187    pub fn add(&self, item: T) {
188        let should_process = {
189            #[cfg(feature = "parking-lot")]
190            {
191                let mut batch = self.batch.lock();
192                batch.push(item);
193                batch.len() >= self.batch_size
194            }
195            #[cfg(not(feature = "parking-lot"))]
196            {
197                let mut batch = self.batch.safe_lock().expect("Failed to lock batch");
198                batch.push(item);
199                batch.len() >= self.batch_size
200            }
201        };
202
203        if should_process {
204            self.process_batch();
205        }
206    }
207
208    fn process_batch(&self) {
209        let batch = {
210            #[cfg(feature = "parking-lot")]
211            {
212                let mut batch_guard = self.batch.lock();
213                std::mem::take(&mut *batch_guard)
214            }
215            #[cfg(not(feature = "parking-lot"))]
216            {
217                let mut batch_guard = self.batch.safe_lock().expect("Failed to lock batch");
218                std::mem::take(&mut *batch_guard)
219            }
220        };
221
222        if !batch.is_empty() {
223            (self.processor)(&batch);
224        }
225    }
226
227    pub fn flush(&self) {
228        self.process_batch();
229    }
230}
231
232/// Performance-aware string handling
233pub fn efficient_string_concat(parts: &[&str]) -> String {
234    if parts.is_empty() {
235        return String::new();
236    }
237
238    if parts.len() == 1 {
239        return parts[0].to_string();
240    }
241
242    // Pre-calculate capacity to avoid reallocations
243    let total_len: usize = parts.iter().map(|s| s.len()).sum();
244    let mut result = String::with_capacity(total_len);
245
246    for part in parts {
247        result.push_str(part);
248    }
249
250    result
251}
252
253/// Optimized clone avoidance for common patterns
254pub fn clone_if_needed<T: Clone>(value: &T, need_owned: bool) -> std::borrow::Cow<'_, T> {
255    if need_owned {
256        std::borrow::Cow::Owned(value.clone())
257    } else {
258        std::borrow::Cow::Borrowed(value)
259    }
260}
261
262#[cfg(test)]
263mod tests {
264    use super::*;
265    use std::sync::Arc;
266    use std::thread;
267
268    #[test]
269    fn test_fast_stats_collector() {
270        let collector = FastStatsCollector::new();
271
272        // Test fast path
273        for _i in 0..100 {
274            // Reduced from 1000 to 100
275            collector.record_allocation_fast(64);
276        }
277
278        let stats = collector.get_basic_stats();
279        assert_eq!(stats.allocation_count, 100); // Updated expectation
280        assert_eq!(stats.total_allocated, 6400); // 100 * 64
281    }
282
283    #[test]
284    fn test_fast_stats_concurrent() {
285        let collector = Arc::new(FastStatsCollector::new());
286        let mut handles = vec![];
287
288        // Test concurrent access
289        for _ in 0..10 {
290            let collector_clone = collector.clone();
291            let handle = thread::spawn(move || {
292                for _ in 0..100 {
293                    collector_clone.record_allocation_fast(32);
294                }
295            });
296            handles.push(handle);
297        }
298
299        for handle in handles {
300            if let Err(e) = handle.join() {
301                eprintln!("Thread join failed: {e:?}");
302            }
303        }
304
305        let stats = collector.get_basic_stats();
306        assert_eq!(stats.allocation_count, 1000); // 10 threads × 100 allocations each
307    }
308
309    #[test]
310    fn test_batch_processor() {
311        let processed_items = Arc::new(SimpleMutex::new(Vec::new()));
312        let processed_clone = processed_items.clone();
313
314        let processor = BatchProcessor::new(3, move |batch: &[i32]| {
315            #[cfg(feature = "parking-lot")]
316            {
317                let mut items = processed_clone.lock();
318                items.extend_from_slice(batch);
319            }
320            #[cfg(not(feature = "parking-lot"))]
321            {
322                let mut items = processed_clone
323                    .safe_lock()
324                    .expect("Failed to lock processed_items");
325                items.extend_from_slice(batch);
326            }
327        });
328
329        // Add items one by one
330        processor.add(1);
331        processor.add(2);
332        processor.add(3); // This should trigger processing
333
334        // Give it a moment to process
335        std::thread::sleep(std::time::Duration::from_millis(10));
336
337        #[cfg(feature = "parking-lot")]
338        {
339            let items = processed_items.lock();
340            assert_eq!(*items, vec![1, 2, 3]);
341        }
342        #[cfg(not(feature = "parking-lot"))]
343        {
344            let items = processed_items
345                .safe_lock()
346                .expect("Failed to lock processed_items");
347            assert_eq!(*items, vec![1, 2, 3]);
348        }
349    }
350
351    #[test]
352    fn test_efficient_string_concat() {
353        let parts = vec!["Hello", " ", "World", "!"];
354        let result = efficient_string_concat(&parts);
355        assert_eq!(result, "Hello World!");
356
357        // Test empty case
358        let empty_result = efficient_string_concat(&[]);
359        assert_eq!(empty_result, "");
360
361        // Test single item
362        let single_result = efficient_string_concat(&["test"]);
363        assert_eq!(single_result, "test");
364    }
365
366    #[test]
367    fn test_fast_stats_collector_comprehensive() {
368        let collector = FastStatsCollector::new();
369
370        // Test initial state
371        let initial_stats = collector.get_basic_stats();
372        assert_eq!(initial_stats.allocation_count, 0);
373        assert_eq!(initial_stats.total_allocated, 0);
374
375        // Test various allocation sizes
376        let test_sizes = [1, 8, 16, 32, 64, 128, 256, 512, 1024, 4096];
377        for &size in &test_sizes {
378            collector.record_allocation_fast(size);
379        }
380
381        let stats = collector.get_basic_stats();
382        assert_eq!(stats.allocation_count, test_sizes.len() as u64);
383        let expected_total: usize = test_sizes.iter().sum();
384        assert_eq!(stats.total_allocated, expected_total as u64);
385
386        // Test zero-size allocation
387        collector.record_allocation_fast(0);
388        let stats_after_zero = collector.get_basic_stats();
389        assert_eq!(
390            stats_after_zero.allocation_count,
391            stats.allocation_count + 1
392        );
393        assert_eq!(stats_after_zero.total_allocated, stats.total_allocated);
394
395        // Test very large allocation
396        collector.record_allocation_fast(usize::MAX);
397        let stats_after_large = collector.get_basic_stats();
398        assert_eq!(
399            stats_after_large.allocation_count,
400            stats_after_zero.allocation_count + 1
401        );
402    }
403
404    #[test]
405    fn test_fast_stats_collector_edge_cases() {
406        let collector = FastStatsCollector::new();
407
408        // Test many small allocations
409        for _ in 0..10000 {
410            collector.record_allocation_fast(1);
411        }
412
413        let stats = collector.get_basic_stats();
414        assert_eq!(stats.allocation_count, 10000);
415        assert_eq!(stats.total_allocated, 10000);
416
417        // Test mixed allocation patterns
418        for i in 0..1000 {
419            collector.record_allocation_fast(i % 100 + 1);
420        }
421
422        let final_stats = collector.get_basic_stats();
423        assert_eq!(final_stats.allocation_count, 11000); // 10000 + 1000
424        assert!(final_stats.total_allocated > 10000);
425    }
426
427    #[test]
428    fn test_batch_processor_comprehensive() {
429        let processed_batches = Arc::new(SimpleMutex::new(Vec::new()));
430        let processed_clone = processed_batches.clone();
431
432        let processor = BatchProcessor::new(5, move |batch: &[String]| {
433            #[cfg(feature = "parking-lot")]
434            {
435                let mut batches = processed_clone.lock();
436                batches.push(batch.to_vec());
437            }
438            #[cfg(not(feature = "parking-lot"))]
439            {
440                let mut batches = processed_clone
441                    .safe_lock()
442                    .expect("Failed to lock processed_batches");
443                batches.push(batch.to_vec());
444            }
445        });
446
447        // Test batch processing with strings
448        for i in 0..12 {
449            processor.add(format!("item_{}", i));
450        }
451
452        // Give time for processing
453        std::thread::sleep(std::time::Duration::from_millis(50));
454
455        #[cfg(feature = "parking-lot")]
456        {
457            let batches = processed_batches.lock();
458            assert!(batches.len() >= 2); // Should have processed at least 2 full batches
459        }
460        #[cfg(not(feature = "parking-lot"))]
461        {
462            let batches = processed_batches
463                .safe_lock()
464                .expect("Failed to lock processed_batches");
465            assert!(batches.len() >= 2); // Should have processed at least 2 full batches
466        }
467    }
468
469    #[test]
470    fn test_batch_processor_single_item() {
471        let processed_items = Arc::new(SimpleMutex::new(Vec::new()));
472        let processed_clone = processed_items.clone();
473
474        let processor = BatchProcessor::new(1, move |batch: &[u32]| {
475            #[cfg(feature = "parking-lot")]
476            {
477                let mut items = processed_clone.lock();
478                items.extend_from_slice(batch);
479            }
480            #[cfg(not(feature = "parking-lot"))]
481            {
482                let mut items = processed_clone
483                    .safe_lock()
484                    .expect("Failed to lock processed_items");
485                items.extend_from_slice(batch);
486            }
487        });
488
489        // With batch size 1, each item should be processed immediately
490        processor.add(42);
491        processor.add(84);
492        processor.add(126);
493
494        std::thread::sleep(std::time::Duration::from_millis(20));
495
496        #[cfg(feature = "parking-lot")]
497        {
498            let items = processed_items.lock();
499            assert_eq!(items.len(), 3);
500            assert!(items.contains(&42));
501            assert!(items.contains(&84));
502            assert!(items.contains(&126));
503        }
504        #[cfg(not(feature = "parking-lot"))]
505        {
506            let items = processed_items
507                .safe_lock()
508                .expect("Failed to lock processed_items");
509            assert_eq!(items.len(), 3);
510            assert!(items.contains(&42));
511            assert!(items.contains(&84));
512            assert!(items.contains(&126));
513        }
514    }
515
516    #[test]
517    fn test_efficient_string_concat_edge_cases() {
518        // Test with very long strings
519        let long_parts: Vec<String> = (0..1000).map(|i| format!("part_{}", i)).collect();
520        let long_parts_str: Vec<&str> = long_parts.iter().map(|s| s.as_str()).collect();
521        let result = efficient_string_concat(&long_parts_str);
522        assert!(result.contains("part_0"));
523        assert!(result.contains("part_999"));
524        assert_eq!(result.matches("part_").count(), 1000);
525
526        // Test with empty strings
527        let empty_parts = vec!["", "", ""];
528        let empty_result = efficient_string_concat(&empty_parts);
529        assert_eq!(empty_result, "");
530
531        // Test with mixed empty and non-empty strings
532        let mixed_parts = vec!["", "hello", "", "world", ""];
533        let mixed_result = efficient_string_concat(&mixed_parts);
534        assert_eq!(mixed_result, "helloworld");
535
536        // Test with unicode strings
537        let unicode_parts = vec!["Hello", " ", "World", " ", "🦀"];
538        let unicode_result = efficient_string_concat(&unicode_parts);
539        assert_eq!(unicode_result, "Hello World 🦀");
540
541        // Test with very large single string
542        let large_string = "a".repeat(10000);
543        let large_parts = vec![large_string.as_str()];
544        let large_result = efficient_string_concat(&large_parts);
545        assert_eq!(large_result.len(), 10000);
546        assert!(large_result.chars().all(|c| c == 'a'));
547    }
548
549    #[test]
550    fn test_basic_stats_operations() {
551        let mut stats = BasicStats {
552            allocation_count: 10,
553            total_allocated: 1024,
554            deallocation_count: 0,
555            total_deallocated: 0,
556        };
557
558        // Test that we can modify stats
559        stats.allocation_count += 5;
560        stats.total_allocated += 512;
561
562        assert_eq!(stats.allocation_count, 15);
563        assert_eq!(stats.total_allocated, 1536);
564
565        // Test default/zero stats
566        let zero_stats = BasicStats {
567            allocation_count: 0,
568            total_allocated: 0,
569            deallocation_count: 0,
570            total_deallocated: 0,
571        };
572
573        assert_eq!(zero_stats.allocation_count, 0);
574        assert_eq!(zero_stats.total_allocated, 0);
575
576        // Test maximum values
577        let max_stats = BasicStats {
578            allocation_count: u64::MAX,
579            total_allocated: u64::MAX,
580            deallocation_count: u64::MAX,
581            total_deallocated: u64::MAX,
582        };
583
584        assert_eq!(max_stats.allocation_count, u64::MAX);
585        assert_eq!(max_stats.total_allocated, u64::MAX);
586    }
587
588    #[test]
589    fn test_fast_stats_collector_concurrent_stress() {
590        let collector = Arc::new(FastStatsCollector::new());
591        let mut handles = vec![];
592
593        // Stress test with many threads
594        for thread_id in 0..5 {
595            let collector_clone = collector.clone();
596            let handle = thread::spawn(move || {
597                for i in 0..20 {
598                    let size = (thread_id * 100 + i) % 1000 + 1;
599                    collector_clone.record_allocation_fast(size);
600                }
601            });
602            handles.push(handle);
603        }
604
605        for handle in handles {
606            handle.join().expect("Thread should complete successfully");
607        }
608
609        let final_stats = collector.get_basic_stats();
610        assert_eq!(final_stats.allocation_count, 100); // 5 threads × 20 allocations
611        assert!(final_stats.total_allocated > 0);
612    }
613
614    #[test]
615    fn test_batch_processor_concurrent_access() {
616        let processed_count = Arc::new(SimpleMutex::new(0usize));
617        let count_clone = processed_count.clone();
618
619        let processor = Arc::new(BatchProcessor::new(10, move |batch: &[usize]| {
620            #[cfg(feature = "parking-lot")]
621            {
622                let mut count = count_clone.lock();
623                *count += batch.len();
624            }
625            #[cfg(not(feature = "parking-lot"))]
626            {
627                let mut count = count_clone.safe_lock().expect("Failed to lock count");
628                *count += batch.len();
629            }
630        }));
631
632        let mut handles = vec![];
633
634        // Multiple threads adding items
635        for thread_id in 0..5 {
636            let processor_clone = processor.clone();
637            let handle = thread::spawn(move || {
638                for i in 0..100 {
639                    processor_clone.add(thread_id * 100 + i);
640                }
641            });
642            handles.push(handle);
643        }
644
645        for handle in handles {
646            handle.join().expect("Thread should complete");
647        }
648
649        // Give time for all batches to be processed
650        std::thread::sleep(std::time::Duration::from_millis(100));
651
652        #[cfg(feature = "parking-lot")]
653        {
654            let count = processed_count.lock();
655            // Due to concurrent access and batch processing, the count might be slightly less
656            // than expected due to timing issues. Allow for some tolerance.
657            assert!(
658                *count >= 450 && *count <= 500,
659                "Expected count between 450-500, got {}",
660                *count
661            );
662        }
663        #[cfg(not(feature = "parking-lot"))]
664        {
665            let count = processed_count.safe_lock().expect("Failed to lock count");
666            // Due to concurrent access and batch processing, the count might be slightly less
667            // than expected due to timing issues. Allow for some tolerance.
668            assert!(
669                *count >= 450 && *count <= 500,
670                "Expected count between 450-500, got {}",
671                *count
672            );
673        }
674    }
675
676    #[test]
677    fn test_efficient_string_concat_performance_characteristics() {
678        // Test that the function handles various input patterns efficiently
679
680        // Many small strings
681        let small_strings: Vec<String> = (0..1000).map(|i| format!("{}", i)).collect();
682        let small_refs: Vec<&str> = small_strings.iter().map(|s| s.as_str()).collect();
683        let result1 = efficient_string_concat(&small_refs);
684        assert!(result1.len() > 1000); // Should contain all numbers
685
686        // Few large strings
687        let large_strings = ["a".repeat(1000), "b".repeat(1000), "c".repeat(1000)];
688        let large_refs: Vec<&str> = large_strings.iter().map(|s| s.as_str()).collect();
689        let result2 = efficient_string_concat(&large_refs);
690        assert_eq!(result2.len(), 3000);
691
692        // Mixed sizes
693        let temp_a = "a".repeat(100);
694        let temp_b = "b".repeat(500);
695        let mixed_strings = vec![
696            "short",
697            temp_a.as_str(),
698            "medium_length_string",
699            temp_b.as_str(),
700        ];
701        let result3 = efficient_string_concat(&mixed_strings);
702        assert!(result3.contains("short"));
703        assert!(result3.contains("medium_length_string"));
704        assert!(result3.len() > 600);
705    }
706
707    #[test]
708    fn test_batch_processor_edge_cases() {
709        // Test with batch size 0 (should handle gracefully)
710        let processed_items = Arc::new(SimpleMutex::new(Vec::new()));
711        let processed_clone = processed_items.clone();
712
713        // Note: BatchProcessor might not accept batch_size 0, so we test with 1
714        let processor = BatchProcessor::new(1, move |batch: &[i32]| {
715            #[cfg(feature = "parking-lot")]
716            {
717                let mut items = processed_clone.lock();
718                items.extend_from_slice(batch);
719            }
720            #[cfg(not(feature = "parking-lot"))]
721            {
722                let mut items = processed_clone.safe_lock().expect("Failed to lock items");
723                items.extend_from_slice(batch);
724            }
725        });
726
727        // Test adding no items
728        std::thread::sleep(std::time::Duration::from_millis(10));
729
730        #[cfg(feature = "parking-lot")]
731        {
732            let items = processed_items.lock();
733            assert_eq!(items.len(), 0);
734        }
735        #[cfg(not(feature = "parking-lot"))]
736        {
737            let items = processed_items.safe_lock().expect("Failed to lock items");
738            assert_eq!(items.len(), 0);
739        }
740
741        // Test adding items after delay
742        processor.add(1);
743        processor.add(2);
744
745        std::thread::sleep(std::time::Duration::from_millis(20));
746
747        #[cfg(feature = "parking-lot")]
748        {
749            let items = processed_items.lock();
750            assert!(!items.is_empty()); // Should have processed at least one item
751        }
752        #[cfg(not(feature = "parking-lot"))]
753        {
754            let items = processed_items.safe_lock().expect("Failed to lock items");
755            assert!(!items.len().is_empty()); // Should have processed at least one item
756        }
757    }
758
759    #[test]
760    fn test_memory_efficiency() {
761        // Test that our optimizations don't use excessive memory
762        let collector = FastStatsCollector::new();
763
764        // Record many allocations
765        for i in 0..100000 {
766            collector.record_allocation_fast(i % 1000 + 1);
767        }
768
769        let stats = collector.get_basic_stats();
770        assert_eq!(stats.allocation_count, 100000);
771
772        // The collector itself should not use excessive memory
773        // This is more of a smoke test to ensure it doesn't panic or crash
774        assert!(stats.total_allocated > 0);
775    }
776}