1use crate::TorshResult;
4use std::collections::HashMap;
5use std::sync::{Arc, Mutex};
6use torsh_core::TorshError;
7
8type AllocationTrace = (usize, std::time::Instant, Option<String>);
10type AllocationTracesMap = HashMap<usize, AllocationTrace>;
11
12#[derive(Debug, Clone, Default)]
14pub struct MemoryStats {
15 pub allocated: usize,
16 pub reserved: usize,
17 pub peak: usize,
18 pub allocations: usize,
19 pub deallocations: usize,
20}
21
22#[derive(Debug, Clone)]
24pub struct MemoryLeak {
25 pub ptr: usize,
26 pub size: usize,
27 pub stack_trace: Option<String>,
28 pub allocation_time: std::time::Instant,
29}
30
31#[derive(Debug, Clone, Default)]
33pub struct LeakDetectionResults {
34 pub potential_leaks: Vec<MemoryLeak>,
35 pub total_leaked_bytes: usize,
36 pub leak_count: usize,
37}
38
39#[derive(Debug, Clone)]
41pub struct MemoryEvent {
42 pub event_type: MemoryEventType,
43 pub ptr: usize,
44 pub size: usize,
45 pub timestamp: std::time::Instant,
46 pub thread_id: usize,
47}
48
49#[derive(Debug, Clone, PartialEq)]
51pub enum MemoryEventType {
52 Allocation,
53 Deallocation,
54}
55
56#[derive(Debug, Clone)]
58pub struct FragmentationAnalysis {
59 pub total_allocated: usize,
60 pub largest_free_block: usize,
61 pub fragmentation_ratio: f64,
62 pub free_blocks: Vec<MemoryBlock>,
63 pub allocated_blocks: Vec<MemoryBlock>,
64 pub external_fragmentation: f64,
65 pub internal_fragmentation: f64,
66}
67
68#[derive(Debug, Clone)]
70pub struct MemoryBlock {
71 pub start_address: usize,
72 pub size: usize,
73 pub end_address: usize,
74}
75
76#[derive(Debug, Clone)]
78pub struct MemoryTimeline {
79 pub events: Vec<MemoryEvent>,
80 pub peak_usage_time: std::time::Instant,
81 pub peak_usage_bytes: usize,
82 pub allocation_rate: f64, pub deallocation_rate: f64, pub average_allocation_size: f64,
85 pub memory_usage_over_time: Vec<(std::time::Duration, usize)>,
86}
87
88pub struct MemoryProfiler {
90 stats: Arc<Mutex<MemoryStats>>,
91 allocations: Arc<Mutex<HashMap<usize, usize>>>, enabled: bool,
93 leak_detection_enabled: bool,
94 allocation_traces: Arc<Mutex<AllocationTracesMap>>, timeline_enabled: bool,
96 timeline_events: Arc<Mutex<Vec<MemoryEvent>>>,
97 fragmentation_enabled: bool,
98 memory_pool_size: usize, start_time: std::time::Instant,
100}
101
102impl Default for MemoryProfiler {
103 fn default() -> Self {
104 Self::new()
105 }
106}
107
108impl MemoryProfiler {
109 pub fn new() -> Self {
111 Self {
112 stats: Arc::new(Mutex::new(MemoryStats::default())),
113 allocations: Arc::new(Mutex::new(HashMap::new())),
114 enabled: false,
115 leak_detection_enabled: false,
116 allocation_traces: Arc::new(Mutex::new(HashMap::new())),
117 timeline_enabled: false,
118 timeline_events: Arc::new(Mutex::new(Vec::new())),
119 fragmentation_enabled: false,
120 memory_pool_size: 1024 * 1024 * 1024, start_time: std::time::Instant::now(),
122 }
123 }
124
125 pub fn enable(&mut self) {
127 self.enabled = true;
128 self.start_time = std::time::Instant::now();
129 if let Ok(mut stats) = self.stats.lock() {
130 *stats = MemoryStats::default();
131 }
132 if let Ok(mut allocations) = self.allocations.lock() {
133 allocations.clear();
134 }
135 if let Ok(mut traces) = self.allocation_traces.lock() {
136 traces.clear();
137 }
138 if let Ok(mut timeline) = self.timeline_events.lock() {
139 timeline.clear();
140 }
141 }
142
143 pub fn disable(&mut self) {
145 self.enabled = false;
146 }
147
148 pub fn record_allocation(&self, ptr: usize, size: usize) -> TorshResult<()> {
150 if !self.enabled {
151 return Ok(());
152 }
153
154 let mut stats = self.stats.lock().map_err(|_| {
155 TorshError::InvalidArgument("Failed to acquire lock on stats".to_string())
156 })?;
157
158 let mut allocations = self.allocations.lock().map_err(|_| {
159 TorshError::InvalidArgument("Failed to acquire lock on allocations".to_string())
160 })?;
161
162 allocations.insert(ptr, size);
163 stats.allocated += size;
164 stats.allocations += 1;
165
166 if stats.allocated > stats.peak {
167 stats.peak = stats.allocated;
168 }
169
170 Ok(())
171 }
172
173 pub fn record_deallocation(&self, ptr: usize) -> TorshResult<()> {
175 if !self.enabled {
176 return Ok(());
177 }
178
179 let timestamp = std::time::Instant::now();
180 let thread_id = std::thread::current().id();
181 let thread_id_num = format!("{thread_id:?}")
182 .chars()
183 .filter(|c| c.is_ascii_digit())
184 .collect::<String>()
185 .parse::<usize>()
186 .unwrap_or(0);
187
188 let mut stats = self.stats.lock().map_err(|_| {
189 TorshError::InvalidArgument("Failed to acquire lock on stats".to_string())
190 })?;
191
192 let mut allocations = self.allocations.lock().map_err(|_| {
193 TorshError::InvalidArgument("Failed to acquire lock on allocations".to_string())
194 })?;
195
196 if let Some(size) = allocations.remove(&ptr) {
197 stats.allocated = stats.allocated.saturating_sub(size);
198 stats.deallocations += 1;
199
200 if self.leak_detection_enabled {
202 let mut traces = self.allocation_traces.lock().map_err(|_| {
203 TorshError::InvalidArgument(
204 "Failed to acquire lock on allocation traces".to_string(),
205 )
206 })?;
207 traces.remove(&ptr);
208 }
209
210 if self.timeline_enabled {
212 let mut timeline = self.timeline_events.lock().map_err(|_| {
213 TorshError::InvalidArgument(
214 "Failed to acquire lock on timeline events".to_string(),
215 )
216 })?;
217 timeline.push(MemoryEvent {
218 event_type: MemoryEventType::Deallocation,
219 ptr,
220 size,
221 timestamp,
222 thread_id: thread_id_num,
223 });
224 }
225 }
226
227 Ok(())
228 }
229
230 pub fn get_stats(&self) -> TorshResult<MemoryStats> {
232 let stats = self.stats.lock().map_err(|_| {
233 TorshError::InvalidArgument("Failed to acquire lock on stats".to_string())
234 })?;
235 Ok(stats.clone())
236 }
237
238 pub fn reset(&self) -> TorshResult<()> {
240 let mut stats = self.stats.lock().map_err(|_| {
241 TorshError::InvalidArgument("Failed to acquire lock on stats".to_string())
242 })?;
243
244 let mut allocations = self.allocations.lock().map_err(|_| {
245 TorshError::InvalidArgument("Failed to acquire lock on allocations".to_string())
246 })?;
247
248 let mut traces = self.allocation_traces.lock().map_err(|_| {
249 TorshError::InvalidArgument("Failed to acquire lock on allocation traces".to_string())
250 })?;
251
252 *stats = MemoryStats::default();
253 allocations.clear();
254 traces.clear();
255
256 Ok(())
257 }
258
259 pub fn set_leak_detection_enabled(&mut self, enabled: bool) {
261 self.leak_detection_enabled = enabled;
262 if enabled {
263 if let Ok(mut traces) = self.allocation_traces.lock() {
265 traces.clear();
266 }
267 }
268 }
269
270 pub fn is_leak_detection_enabled(&self) -> bool {
272 self.leak_detection_enabled
273 }
274
275 pub fn set_timeline_enabled(&mut self, enabled: bool) {
277 self.timeline_enabled = enabled;
278 if enabled {
279 if let Ok(mut timeline) = self.timeline_events.lock() {
280 timeline.clear();
281 }
282 }
283 }
284
285 pub fn is_timeline_enabled(&self) -> bool {
287 self.timeline_enabled
288 }
289
290 pub fn set_fragmentation_enabled(&mut self, enabled: bool) {
292 self.fragmentation_enabled = enabled;
293 }
294
295 pub fn is_fragmentation_enabled(&self) -> bool {
297 self.fragmentation_enabled
298 }
299
300 pub fn set_memory_pool_size(&mut self, size: usize) {
302 self.memory_pool_size = size;
303 }
304
305 pub fn record_allocation_with_trace(
307 &self,
308 ptr: usize,
309 size: usize,
310 stack_trace: Option<String>,
311 ) -> TorshResult<()> {
312 if !self.enabled {
313 return Ok(());
314 }
315
316 let timestamp = std::time::Instant::now();
317 let thread_id = std::thread::current().id();
318 let thread_id_num = format!("{thread_id:?}")
319 .chars()
320 .filter(|c| c.is_ascii_digit())
321 .collect::<String>()
322 .parse::<usize>()
323 .unwrap_or(0);
324
325 let mut stats = self.stats.lock().map_err(|_| {
326 TorshError::InvalidArgument("Failed to acquire lock on stats".to_string())
327 })?;
328
329 let mut allocations = self.allocations.lock().map_err(|_| {
330 TorshError::InvalidArgument("Failed to acquire lock on allocations".to_string())
331 })?;
332
333 allocations.insert(ptr, size);
334 stats.allocated += size;
335 stats.allocations += 1;
336
337 if stats.allocated > stats.peak {
338 stats.peak = stats.allocated;
339 }
340
341 if self.leak_detection_enabled {
343 let mut traces = self.allocation_traces.lock().map_err(|_| {
344 TorshError::InvalidArgument(
345 "Failed to acquire lock on allocation traces".to_string(),
346 )
347 })?;
348 traces.insert(ptr, (size, timestamp, stack_trace));
349 }
350
351 if self.timeline_enabled {
353 let mut timeline = self.timeline_events.lock().map_err(|_| {
354 TorshError::InvalidArgument("Failed to acquire lock on timeline events".to_string())
355 })?;
356 timeline.push(MemoryEvent {
357 event_type: MemoryEventType::Allocation,
358 ptr,
359 size,
360 timestamp,
361 thread_id: thread_id_num,
362 });
363 }
364
365 Ok(())
366 }
367
368 pub fn detect_leaks(&self) -> TorshResult<LeakDetectionResults> {
370 if !self.leak_detection_enabled {
371 return Ok(LeakDetectionResults::default());
372 }
373
374 let traces = self.allocation_traces.lock().map_err(|_| {
375 TorshError::InvalidArgument("Failed to acquire lock on allocation traces".to_string())
376 })?;
377
378 let mut potential_leaks = Vec::new();
379 let mut total_leaked_bytes = 0;
380
381 for (&ptr, &(size, allocation_time, ref stack_trace)) in traces.iter() {
382 potential_leaks.push(MemoryLeak {
383 ptr,
384 size,
385 stack_trace: stack_trace.clone(),
386 allocation_time,
387 });
388 total_leaked_bytes += size;
389 }
390
391 Ok(LeakDetectionResults {
392 leak_count: potential_leaks.len(),
393 total_leaked_bytes,
394 potential_leaks,
395 })
396 }
397
398 pub fn get_leaks_older_than(
400 &self,
401 duration: std::time::Duration,
402 ) -> TorshResult<LeakDetectionResults> {
403 if !self.leak_detection_enabled {
404 return Ok(LeakDetectionResults::default());
405 }
406
407 let traces = self.allocation_traces.lock().map_err(|_| {
408 TorshError::InvalidArgument("Failed to acquire lock on allocation traces".to_string())
409 })?;
410
411 let now = std::time::Instant::now();
412 let mut potential_leaks = Vec::new();
413 let mut total_leaked_bytes = 0;
414
415 for (&ptr, &(size, allocation_time, ref stack_trace)) in traces.iter() {
416 if now.duration_since(allocation_time) > duration {
417 potential_leaks.push(MemoryLeak {
418 ptr,
419 size,
420 stack_trace: stack_trace.clone(),
421 allocation_time,
422 });
423 total_leaked_bytes += size;
424 }
425 }
426
427 Ok(LeakDetectionResults {
428 leak_count: potential_leaks.len(),
429 total_leaked_bytes,
430 potential_leaks,
431 })
432 }
433
434 pub fn get_largest_leaks(&self, count: usize) -> TorshResult<LeakDetectionResults> {
436 if !self.leak_detection_enabled {
437 return Ok(LeakDetectionResults::default());
438 }
439
440 let traces = self.allocation_traces.lock().map_err(|_| {
441 TorshError::InvalidArgument("Failed to acquire lock on allocation traces".to_string())
442 })?;
443
444 let mut leaks: Vec<MemoryLeak> = traces
445 .iter()
446 .map(
447 |(&ptr, &(size, allocation_time, ref stack_trace))| MemoryLeak {
448 ptr,
449 size,
450 stack_trace: stack_trace.clone(),
451 allocation_time,
452 },
453 )
454 .collect();
455
456 leaks.sort_by(|a, b| b.size.cmp(&a.size));
457 leaks.truncate(count);
458
459 let total_leaked_bytes = leaks.iter().map(|leak| leak.size).sum();
460
461 Ok(LeakDetectionResults {
462 leak_count: leaks.len(),
463 total_leaked_bytes,
464 potential_leaks: leaks,
465 })
466 }
467
468 pub fn analyze_fragmentation(&self) -> TorshResult<FragmentationAnalysis> {
470 if !self.fragmentation_enabled {
471 return Err(TorshError::InvalidArgument(
472 "Fragmentation analysis is not enabled".to_string(),
473 ));
474 }
475
476 let allocations = self.allocations.lock().map_err(|_| {
477 TorshError::InvalidArgument("Failed to acquire lock on allocations".to_string())
478 })?;
479
480 let mut allocated_blocks: Vec<MemoryBlock> = allocations
481 .iter()
482 .map(|(&ptr, &size)| MemoryBlock {
483 start_address: ptr,
484 size,
485 end_address: ptr + size,
486 })
487 .collect();
488
489 allocated_blocks.sort_by_key(|block| block.start_address);
491
492 let mut free_blocks = Vec::new();
494 let mut current_addr = 0;
495
496 for block in &allocated_blocks {
497 if current_addr < block.start_address {
498 free_blocks.push(MemoryBlock {
499 start_address: current_addr,
500 size: block.start_address - current_addr,
501 end_address: block.start_address,
502 });
503 }
504 current_addr = block.end_address;
505 }
506
507 if current_addr < self.memory_pool_size {
509 free_blocks.push(MemoryBlock {
510 start_address: current_addr,
511 size: self.memory_pool_size - current_addr,
512 end_address: self.memory_pool_size,
513 });
514 }
515
516 let total_allocated: usize = allocated_blocks.iter().map(|b| b.size).sum();
517 let total_free: usize = free_blocks.iter().map(|b| b.size).sum();
518 let largest_free_block = free_blocks.iter().map(|b| b.size).max().unwrap_or(0);
519
520 let fragmentation_ratio = if total_free > 0 {
522 1.0 - (largest_free_block as f64 / total_free as f64)
523 } else {
524 0.0
525 };
526
527 let external_fragmentation = if total_free > 0 {
528 (total_free - largest_free_block) as f64 / total_free as f64
529 } else {
530 0.0
531 };
532
533 let avg_allocation_size = if allocated_blocks.is_empty() {
536 0.0
537 } else {
538 total_allocated as f64 / allocated_blocks.len() as f64
539 };
540
541 let internal_fragmentation = if avg_allocation_size > 0.0 {
542 (avg_allocation_size / 1024.0).min(1.0) * 0.05
544 } else {
545 0.0
546 };
547
548 Ok(FragmentationAnalysis {
549 total_allocated,
550 largest_free_block,
551 fragmentation_ratio,
552 free_blocks,
553 allocated_blocks,
554 external_fragmentation,
555 internal_fragmentation,
556 })
557 }
558
559 pub fn get_timeline_analysis(&self) -> TorshResult<MemoryTimeline> {
561 if !self.timeline_enabled {
562 return Err(TorshError::InvalidArgument(
563 "Timeline tracking is not enabled".to_string(),
564 ));
565 }
566
567 let timeline = self.timeline_events.lock().map_err(|_| {
568 TorshError::InvalidArgument("Failed to acquire lock on timeline events".to_string())
569 })?;
570
571 if timeline.is_empty() {
572 return Ok(MemoryTimeline {
573 events: Vec::new(),
574 peak_usage_time: self.start_time,
575 peak_usage_bytes: 0,
576 allocation_rate: 0.0,
577 deallocation_rate: 0.0,
578 average_allocation_size: 0.0,
579 memory_usage_over_time: Vec::new(),
580 });
581 }
582
583 let events = timeline.clone();
584 let total_duration = timeline
585 .last()
586 .expect("timeline should not be empty after early return check")
587 .timestamp
588 .duration_since(self.start_time)
589 .as_secs_f64();
590
591 let allocation_count = events
593 .iter()
594 .filter(|e| e.event_type == MemoryEventType::Allocation)
595 .count();
596 let deallocation_count = events
597 .iter()
598 .filter(|e| e.event_type == MemoryEventType::Deallocation)
599 .count();
600
601 let allocation_rate = if total_duration > 0.0 {
602 allocation_count as f64 / total_duration
603 } else {
604 0.0
605 };
606
607 let deallocation_rate = if total_duration > 0.0 {
608 deallocation_count as f64 / total_duration
609 } else {
610 0.0
611 };
612
613 let allocation_sizes: Vec<usize> = events
615 .iter()
616 .filter(|e| e.event_type == MemoryEventType::Allocation)
617 .map(|e| e.size)
618 .collect();
619
620 let average_allocation_size = if allocation_sizes.is_empty() {
621 0.0
622 } else {
623 allocation_sizes.iter().sum::<usize>() as f64 / allocation_sizes.len() as f64
624 };
625
626 let mut memory_usage_over_time = Vec::new();
628 let mut current_usage = 0usize;
629 let mut peak_usage_bytes = 0usize;
630 let mut peak_usage_time = self.start_time;
631
632 for event in &events {
633 match event.event_type {
634 MemoryEventType::Allocation => {
635 current_usage += event.size;
636 if current_usage > peak_usage_bytes {
637 peak_usage_bytes = current_usage;
638 peak_usage_time = event.timestamp;
639 }
640 }
641 MemoryEventType::Deallocation => {
642 current_usage = current_usage.saturating_sub(event.size);
643 }
644 }
645
646 memory_usage_over_time.push((
647 event.timestamp.duration_since(self.start_time),
648 current_usage,
649 ));
650 }
651
652 Ok(MemoryTimeline {
653 events,
654 peak_usage_time,
655 peak_usage_bytes,
656 allocation_rate,
657 deallocation_rate,
658 average_allocation_size,
659 memory_usage_over_time,
660 })
661 }
662
663 pub fn export_timeline_csv(&self, path: &str) -> TorshResult<()> {
665 let timeline = self.get_timeline_analysis()?;
666
667 use std::fs::File;
668 use std::io::{BufWriter, Write};
669
670 let file = File::create(path).map_err(|e| {
671 TorshError::InvalidArgument(format!("Failed to create file {path}: {e}"))
672 })?;
673
674 let mut writer = BufWriter::new(file);
675
676 writeln!(
678 writer,
679 "timestamp_ms,event_type,ptr,size,thread_id,cumulative_usage"
680 )
681 .map_err(|e| TorshError::InvalidArgument(format!("Failed to write CSV header: {e}")))?;
682
683 let mut current_usage = 0usize;
684 for event in &timeline.events {
685 match event.event_type {
686 MemoryEventType::Allocation => current_usage += event.size,
687 MemoryEventType::Deallocation => {
688 current_usage = current_usage.saturating_sub(event.size)
689 }
690 }
691
692 let timestamp_ms = event.timestamp.duration_since(self.start_time).as_millis();
693 let event_type_str = match event.event_type {
694 MemoryEventType::Allocation => "allocation",
695 MemoryEventType::Deallocation => "deallocation",
696 };
697
698 writeln!(
699 writer,
700 "{},{},{:#x},{},{},{}",
701 timestamp_ms, event_type_str, event.ptr, event.size, event.thread_id, current_usage
702 )
703 .map_err(|e| TorshError::InvalidArgument(format!("Failed to write CSV row: {e}")))?;
704 }
705
706 writer
707 .flush()
708 .map_err(|e| TorshError::InvalidArgument(format!("Failed to flush CSV writer: {e}")))
709 }
710}
711
712pub fn get_system_memory() -> TorshResult<SystemMemoryInfo> {
714 Ok(SystemMemoryInfo {
717 total: 8 * 1024 * 1024 * 1024, available: 4 * 1024 * 1024 * 1024, used: 4 * 1024 * 1024 * 1024, })
721}
722
723#[derive(Debug, Clone)]
725pub struct SystemMemoryInfo {
726 pub total: usize,
727 pub available: usize,
728 pub used: usize,
729}
730
731pub fn profile_memory() -> TorshResult<MemoryStats> {
733 let mut profiler = MemoryProfiler::new();
734 profiler.enable();
735
736 let ptr1 = 0x1000;
738 let ptr2 = 0x2000;
739
740 profiler.record_allocation(ptr1, 1024)?;
741 profiler.record_allocation(ptr2, 2048)?;
742 profiler.record_deallocation(ptr1)?;
743
744 profiler.get_stats()
745}
746
747#[cfg(test)]
748mod tests {
749 use super::*;
750
751 #[test]
752 fn test_memory_profiler_creation() {
753 let profiler = MemoryProfiler::new();
754 assert!(!profiler.enabled);
755 }
756
757 #[test]
758 fn test_memory_profiler_enable_disable() {
759 let mut profiler = MemoryProfiler::new();
760 profiler.enable();
761 assert!(profiler.enabled);
762
763 profiler.disable();
764 assert!(!profiler.enabled);
765 }
766
767 #[test]
768 fn test_memory_allocation_tracking() {
769 let mut profiler = MemoryProfiler::new();
770 profiler.enable();
771
772 profiler.record_allocation(0x1000, 1024).unwrap();
773 profiler.record_allocation(0x2000, 2048).unwrap();
774
775 let stats = profiler.get_stats().unwrap();
776 assert_eq!(stats.allocated, 3072);
777 assert_eq!(stats.allocations, 2);
778 assert_eq!(stats.peak, 3072);
779 }
780
781 #[test]
782 fn test_memory_deallocation_tracking() {
783 let mut profiler = MemoryProfiler::new();
784 profiler.enable();
785
786 profiler.record_allocation(0x1000, 1024).unwrap();
787 profiler.record_allocation(0x2000, 2048).unwrap();
788 profiler.record_deallocation(0x1000).unwrap();
789
790 let stats = profiler.get_stats().unwrap();
791 assert_eq!(stats.allocated, 2048);
792 assert_eq!(stats.allocations, 2);
793 assert_eq!(stats.deallocations, 1);
794 assert_eq!(stats.peak, 3072); }
796
797 #[test]
798 fn test_memory_stats_reset() {
799 let mut profiler = MemoryProfiler::new();
800 profiler.enable();
801
802 profiler.record_allocation(0x1000, 1024).unwrap();
803 profiler.reset().unwrap();
804
805 let stats = profiler.get_stats().unwrap();
806 assert_eq!(stats.allocated, 0);
807 assert_eq!(stats.allocations, 0);
808 assert_eq!(stats.peak, 0);
809 }
810
811 #[test]
812 fn test_leak_detection_basic() {
813 let mut profiler = MemoryProfiler::new();
814 profiler.enable();
815 profiler.set_leak_detection_enabled(true);
816
817 assert!(profiler.is_leak_detection_enabled());
818
819 profiler
821 .record_allocation_with_trace(0x1000, 1024, Some("test_trace".to_string()))
822 .unwrap();
823 profiler
824 .record_allocation_with_trace(0x2000, 2048, None)
825 .unwrap();
826
827 let leaks = profiler.detect_leaks().unwrap();
829 assert_eq!(leaks.leak_count, 2);
830 assert_eq!(leaks.total_leaked_bytes, 3072);
831 assert_eq!(leaks.potential_leaks.len(), 2);
832
833 let leak_0x1000 = leaks
835 .potential_leaks
836 .iter()
837 .find(|l| l.ptr == 0x1000)
838 .unwrap();
839 assert_eq!(leak_0x1000.size, 1024);
840 assert!(leak_0x1000.stack_trace.is_some());
841
842 let leak_0x2000 = leaks
843 .potential_leaks
844 .iter()
845 .find(|l| l.ptr == 0x2000)
846 .unwrap();
847 assert_eq!(leak_0x2000.size, 2048);
848 assert!(leak_0x2000.stack_trace.is_none());
849 }
850
851 #[test]
852 fn test_leak_detection_with_deallocation() {
853 let mut profiler = MemoryProfiler::new();
854 profiler.enable();
855 profiler.set_leak_detection_enabled(true);
856
857 profiler
859 .record_allocation_with_trace(0x1000, 1024, None)
860 .unwrap();
861 profiler
862 .record_allocation_with_trace(0x2000, 2048, None)
863 .unwrap();
864
865 profiler.record_deallocation(0x1000).unwrap();
867
868 let leaks = profiler.detect_leaks().unwrap();
870 assert_eq!(leaks.leak_count, 1);
871 assert_eq!(leaks.total_leaked_bytes, 2048);
872 assert_eq!(leaks.potential_leaks[0].ptr, 0x2000);
873 }
874
875 #[test]
876 fn test_leak_detection_disabled() {
877 let mut profiler = MemoryProfiler::new();
878 profiler.enable();
879 assert!(!profiler.is_leak_detection_enabled());
882
883 profiler
884 .record_allocation_with_trace(0x1000, 1024, None)
885 .unwrap();
886
887 let leaks = profiler.detect_leaks().unwrap();
888 assert_eq!(leaks.leak_count, 0);
889 assert_eq!(leaks.total_leaked_bytes, 0);
890 }
891
892 #[test]
893 fn test_get_largest_leaks() {
894 let mut profiler = MemoryProfiler::new();
895 profiler.enable();
896 profiler.set_leak_detection_enabled(true);
897
898 profiler
900 .record_allocation_with_trace(0x1000, 512, None)
901 .unwrap();
902 profiler
903 .record_allocation_with_trace(0x2000, 2048, None)
904 .unwrap();
905 profiler
906 .record_allocation_with_trace(0x3000, 1024, None)
907 .unwrap();
908
909 let largest_leaks = profiler.get_largest_leaks(2).unwrap();
911 assert_eq!(largest_leaks.leak_count, 2);
912 assert_eq!(largest_leaks.potential_leaks[0].size, 2048); assert_eq!(largest_leaks.potential_leaks[1].size, 1024); }
915
916 #[test]
917 fn test_get_leaks_older_than() {
918 let mut profiler = MemoryProfiler::new();
919 profiler.enable();
920 profiler.set_leak_detection_enabled(true);
921
922 profiler
924 .record_allocation_with_trace(0x1000, 1024, None)
925 .unwrap();
926
927 std::thread::sleep(std::time::Duration::from_millis(10));
929
930 profiler
931 .record_allocation_with_trace(0x2000, 2048, None)
932 .unwrap();
933
934 let old_leaks = profiler
936 .get_leaks_older_than(std::time::Duration::from_millis(5))
937 .unwrap();
938 assert_eq!(old_leaks.leak_count, 1);
939 assert_eq!(old_leaks.potential_leaks[0].ptr, 0x1000);
940
941 let very_old_leaks = profiler
943 .get_leaks_older_than(std::time::Duration::from_millis(20))
944 .unwrap();
945 assert_eq!(very_old_leaks.leak_count, 0);
946 }
947
948 #[test]
949 fn test_fragmentation_analysis() {
950 let mut profiler = MemoryProfiler::new();
951 profiler.enable();
952 profiler.set_fragmentation_enabled(true);
953 profiler.set_memory_pool_size(1024 * 1024); profiler.record_allocation(0x1000, 1024).unwrap();
957 profiler.record_allocation(0x3000, 2048).unwrap(); profiler.record_allocation(0x4000, 512).unwrap();
959
960 let analysis = profiler.analyze_fragmentation().unwrap();
961
962 assert!(analysis.total_allocated > 0);
963 assert!(analysis.fragmentation_ratio >= 0.0);
964 assert!(!analysis.allocated_blocks.is_empty());
965 assert!(!analysis.free_blocks.is_empty());
966 assert!(analysis.external_fragmentation >= 0.0);
967 assert!(analysis.internal_fragmentation >= 0.0);
968 }
969
970 #[test]
971 fn test_fragmentation_analysis_disabled() {
972 let mut profiler = MemoryProfiler::new();
973 profiler.enable();
974 let result = profiler.analyze_fragmentation();
977 assert!(result.is_err());
978 }
979
980 #[test]
981 fn test_timeline_tracking() {
982 let mut profiler = MemoryProfiler::new();
983 profiler.enable();
984 profiler.set_timeline_enabled(true);
985
986 profiler
988 .record_allocation_with_trace(0x1000, 1024, None)
989 .unwrap();
990 std::thread::sleep(std::time::Duration::from_millis(1));
991
992 profiler
993 .record_allocation_with_trace(0x2000, 2048, None)
994 .unwrap();
995 std::thread::sleep(std::time::Duration::from_millis(1));
996
997 profiler.record_deallocation(0x1000).unwrap();
998
999 let timeline = profiler.get_timeline_analysis().unwrap();
1000
1001 assert_eq!(timeline.events.len(), 3);
1002 assert!(timeline.peak_usage_bytes > 0);
1003 assert!(timeline.allocation_rate >= 0.0);
1004 assert!(timeline.deallocation_rate >= 0.0);
1005 assert!(timeline.average_allocation_size > 0.0);
1006 assert!(!timeline.memory_usage_over_time.is_empty());
1007 }
1008
1009 #[test]
1010 fn test_timeline_tracking_disabled() {
1011 let mut profiler = MemoryProfiler::new();
1012 profiler.enable();
1013 let result = profiler.get_timeline_analysis();
1016 assert!(result.is_err());
1017 }
1018
1019 #[test]
1020 fn test_timeline_csv_export() {
1021 let mut profiler = MemoryProfiler::new();
1022 profiler.enable();
1023 profiler.set_timeline_enabled(true);
1024
1025 profiler
1027 .record_allocation_with_trace(0x1000, 1024, None)
1028 .unwrap();
1029 profiler
1030 .record_allocation_with_trace(0x2000, 2048, None)
1031 .unwrap();
1032 profiler.record_deallocation(0x1000).unwrap();
1033
1034 let timeline_path = std::env::temp_dir().join("test_timeline.csv");
1035 let timeline_str = timeline_path.display().to_string();
1036 let result = profiler.export_timeline_csv(&timeline_str);
1037 assert!(result.is_ok());
1038
1039 let _ = std::fs::remove_file(&timeline_path);
1041 }
1042
1043 #[test]
1044 fn test_memory_event_types() {
1045 use super::MemoryEventType;
1046
1047 assert_eq!(MemoryEventType::Allocation, MemoryEventType::Allocation);
1048 assert_eq!(MemoryEventType::Deallocation, MemoryEventType::Deallocation);
1049 assert_ne!(MemoryEventType::Allocation, MemoryEventType::Deallocation);
1050 }
1051
1052 #[test]
1053 fn test_timeline_configuration() {
1054 let mut profiler = MemoryProfiler::new();
1055
1056 assert!(!profiler.is_timeline_enabled());
1057 assert!(!profiler.is_fragmentation_enabled());
1058
1059 profiler.set_timeline_enabled(true);
1060 assert!(profiler.is_timeline_enabled());
1061
1062 profiler.set_fragmentation_enabled(true);
1063 assert!(profiler.is_fragmentation_enabled());
1064
1065 profiler.set_memory_pool_size(2 * 1024 * 1024); }
1068}