1use crate::{UtilsError, UtilsResult};
8use std::alloc::{GlobalAlloc, Layout};
9use std::collections::HashMap;
10use std::fs::File;
11use std::sync::{Arc, Mutex, RwLock};
12use std::time::{Duration, Instant};
13
14pub struct TrackingAllocator<A: GlobalAlloc> {
16 inner: A,
17 stats: Arc<RwLock<AllocationStats>>,
18}
19
20#[derive(Debug, Clone, Default)]
22pub struct AllocationStats {
23 pub total_allocated: u64,
24 pub total_deallocated: u64,
25 pub current_allocated: u64,
26 pub peak_allocated: u64,
27 pub allocation_count: u64,
28 pub deallocation_count: u64,
29 pub leak_count: u64,
30}
31
32impl<A: GlobalAlloc> TrackingAllocator<A> {
33 pub fn new(inner: A) -> Self {
34 Self {
35 inner,
36 stats: Arc::new(RwLock::new(AllocationStats::default())),
37 }
38 }
39
40 pub fn stats(&self) -> AllocationStats {
41 self.stats.read().unwrap().clone()
42 }
43
44 pub fn reset_stats(&self) {
45 let mut stats = self.stats.write().unwrap();
46 *stats = AllocationStats::default();
47 }
48}
49
50unsafe impl<A: GlobalAlloc> GlobalAlloc for TrackingAllocator<A> {
51 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
52 let ptr = self.inner.alloc(layout);
53 if !ptr.is_null() {
54 let mut stats = self.stats.write().unwrap();
55 stats.total_allocated += layout.size() as u64;
56 stats.current_allocated += layout.size() as u64;
57 stats.allocation_count += 1;
58 if stats.current_allocated > stats.peak_allocated {
59 stats.peak_allocated = stats.current_allocated;
60 }
61 }
62 ptr
63 }
64
65 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
66 self.inner.dealloc(ptr, layout);
67 let mut stats = self.stats.write().unwrap();
68 stats.total_deallocated += layout.size() as u64;
69 stats.current_allocated = stats.current_allocated.saturating_sub(layout.size() as u64);
70 stats.deallocation_count += 1;
71 }
72}
73
74pub struct MemoryPool<T> {
76 blocks: Vec<Box<[T]>>,
77 free_list: Vec<*mut T>,
78 block_size: usize,
79 stats: AllocationStats,
80}
81
82impl<T: Default + Clone> MemoryPool<T> {
83 pub fn new(block_size: usize) -> Self {
84 Self {
85 blocks: Vec::new(),
86 free_list: Vec::new(),
87 block_size,
88 stats: AllocationStats::default(),
89 }
90 }
91
92 pub fn allocate(&mut self) -> Option<&mut T> {
93 if self.free_list.is_empty() {
94 self.add_block();
95 }
96
97 if let Some(ptr) = self.free_list.pop() {
98 self.stats.allocation_count += 1;
99 self.stats.current_allocated += std::mem::size_of::<T>() as u64;
100 unsafe { Some(&mut *ptr) }
101 } else {
102 None
103 }
104 }
105
106 pub fn deallocate(&mut self, item: &mut T) {
107 let ptr = item as *mut T;
108 self.free_list.push(ptr);
109 self.stats.deallocation_count += 1;
110 self.stats.current_allocated = self
111 .stats
112 .current_allocated
113 .saturating_sub(std::mem::size_of::<T>() as u64);
114 }
115
116 fn add_block(&mut self) {
117 let mut block = vec![T::default(); self.block_size].into_boxed_slice();
118 for item in block.iter_mut() {
119 self.free_list.push(item as *mut T);
120 }
121 self.blocks.push(block);
122 self.stats.total_allocated += (self.block_size * std::mem::size_of::<T>()) as u64;
123 }
124
125 pub fn stats(&self) -> &AllocationStats {
126 &self.stats
127 }
128
129 pub fn capacity(&self) -> usize {
130 self.blocks.len() * self.block_size
131 }
132
133 pub fn used(&self) -> usize {
134 self.capacity() - self.free_list.len()
135 }
136}
137
138pub struct LeakDetector {
140 allocations: Arc<Mutex<HashMap<usize, AllocationInfo>>>,
141 enabled: bool,
142}
143
144#[derive(Debug, Clone)]
145pub struct AllocationInfo {
146 pub size: usize,
147 pub timestamp: Instant,
148 pub backtrace: String,
149}
150
151impl LeakDetector {
152 pub fn new() -> Self {
153 Self {
154 allocations: Arc::new(Mutex::new(HashMap::new())),
155 enabled: true,
156 }
157 }
158
159 pub fn enable(&mut self) {
160 self.enabled = true;
161 }
162
163 pub fn disable(&mut self) {
164 self.enabled = false;
165 }
166
167 pub fn track_allocation(&self, ptr: *mut u8, size: usize) {
168 if !self.enabled {
169 return;
170 }
171
172 let mut allocations = self.allocations.lock().unwrap();
173 allocations.insert(
174 ptr as usize,
175 AllocationInfo {
176 size,
177 timestamp: Instant::now(),
178 backtrace: format!("Allocation at {ptr:p}"), },
180 );
181 }
182
183 pub fn track_deallocation(&self, ptr: *mut u8) {
184 if !self.enabled {
185 return;
186 }
187
188 let mut allocations = self.allocations.lock().unwrap();
189 allocations.remove(&(ptr as usize));
190 }
191
192 pub fn check_leaks(&self) -> Vec<AllocationInfo> {
193 let allocations = self.allocations.lock().unwrap();
194 allocations.values().cloned().collect()
195 }
196
197 pub fn check_leaks_older_than(&self, duration: Duration) -> Vec<AllocationInfo> {
198 let allocations = self.allocations.lock().unwrap();
199 let now = Instant::now();
200 allocations
201 .values()
202 .filter(|info| now.duration_since(info.timestamp) > duration)
203 .cloned()
204 .collect()
205 }
206
207 pub fn total_leaked_bytes(&self) -> usize {
208 let allocations = self.allocations.lock().unwrap();
209 allocations.values().map(|info| info.size).sum()
210 }
211
212 pub fn clear(&self) {
213 let mut allocations = self.allocations.lock().unwrap();
214 allocations.clear();
215 }
216}
217
218impl Default for LeakDetector {
219 fn default() -> Self {
220 Self::new()
221 }
222}
223
224pub struct MemoryMappedFile {
226 #[allow(dead_code)]
227 file: File,
228 ptr: *mut u8,
229 size: usize,
230}
231
232impl MemoryMappedFile {
233 #[cfg(unix)]
234 pub fn new(file: File, writable: bool) -> Result<Self, std::io::Error> {
235 use std::os::unix::io::AsRawFd;
236
237 let size = file.metadata()?.len() as usize;
238 let prot = if writable {
239 libc::PROT_READ | libc::PROT_WRITE
240 } else {
241 libc::PROT_READ
242 };
243
244 let ptr = unsafe {
245 libc::mmap(
246 std::ptr::null_mut(),
247 size,
248 prot,
249 libc::MAP_SHARED,
250 file.as_raw_fd(),
251 0,
252 )
253 };
254
255 if ptr == libc::MAP_FAILED {
256 return Err(std::io::Error::last_os_error());
257 }
258
259 Ok(Self {
260 file,
261 ptr: ptr as *mut u8,
262 size,
263 })
264 }
265
266 #[cfg(windows)]
267 pub fn new(file: File, writable: bool) -> Result<Self, std::io::Error> {
268 use std::os::windows::io::AsRawHandle;
269 use winapi::um::handleapi::CloseHandle;
270 use winapi::um::memoryapi::{
271 CreateFileMappingW, MapViewOfFile, FILE_MAP_READ, FILE_MAP_WRITE,
272 };
273 use winapi::um::winnt::{PAGE_READONLY, PAGE_READWRITE};
274
275 let size = file.metadata()?.len() as usize;
276 let protect = if writable {
277 PAGE_READWRITE
278 } else {
279 PAGE_READONLY
280 };
281 let access = if writable {
282 FILE_MAP_WRITE
283 } else {
284 FILE_MAP_READ
285 };
286
287 let mapping = unsafe {
288 CreateFileMappingW(
289 file.as_raw_handle() as _,
290 std::ptr::null_mut(),
291 protect,
292 0,
293 0,
294 std::ptr::null(),
295 )
296 };
297
298 if mapping.is_null() {
299 return Err(std::io::Error::last_os_error());
300 }
301
302 let ptr = unsafe { MapViewOfFile(mapping, access, 0, 0, 0) };
303 unsafe { CloseHandle(mapping) };
304
305 if ptr.is_null() {
306 return Err(std::io::Error::last_os_error());
307 }
308
309 Ok(Self {
310 file,
311 ptr: ptr as *mut u8,
312 size,
313 })
314 }
315
316 #[cfg(not(any(unix, windows)))]
317 pub fn new(_file: File, _writable: bool) -> Result<Self, std::io::Error> {
318 Err(std::io::Error::new(
319 std::io::ErrorKind::Unsupported,
320 "Memory mapping not supported on this platform",
321 ))
322 }
323
324 pub fn as_slice(&self) -> &[u8] {
325 unsafe { std::slice::from_raw_parts(self.ptr, self.size) }
326 }
327
328 pub fn as_mut_slice(&mut self) -> &mut [u8] {
329 unsafe { std::slice::from_raw_parts_mut(self.ptr, self.size) }
330 }
331
332 pub fn size(&self) -> usize {
333 self.size
334 }
335}
336
337impl Drop for MemoryMappedFile {
338 fn drop(&mut self) {
339 if !self.ptr.is_null() {
340 #[cfg(unix)]
341 unsafe {
342 libc::munmap(self.ptr as *mut libc::c_void, self.size);
343 }
344
345 #[cfg(windows)]
346 unsafe {
347 winapi::um::memoryapi::UnmapViewOfFile(self.ptr as *mut winapi::ctypes::c_void);
348 }
349 }
350 }
351}
352
353unsafe impl Send for MemoryMappedFile {}
354unsafe impl Sync for MemoryMappedFile {}
355
356pub struct GcHelper<T> {
358 data: Arc<T>,
359 weak_refs: Arc<Mutex<Vec<std::sync::Weak<T>>>>,
360}
361
362impl<T> GcHelper<T> {
363 pub fn new(data: T) -> Self {
364 Self {
365 data: Arc::new(data),
366 weak_refs: Arc::new(Mutex::new(Vec::new())),
367 }
368 }
369
370 pub fn get_ref(&self) -> Arc<T> {
371 self.data.clone()
372 }
373
374 pub fn get_weak_ref(&self) -> std::sync::Weak<T> {
375 let weak = Arc::downgrade(&self.data);
376 let mut refs = self.weak_refs.lock().unwrap();
377 refs.push(weak.clone());
378 weak
379 }
380
381 pub fn collect_garbage(&self) {
382 let mut refs = self.weak_refs.lock().unwrap();
383 refs.retain(|weak_ref| weak_ref.upgrade().is_some());
384 }
385
386 pub fn ref_count(&self) -> usize {
387 Arc::strong_count(&self.data)
388 }
389
390 pub fn weak_ref_count(&self) -> usize {
391 let refs = self.weak_refs.lock().unwrap();
392 refs.len()
393 }
394}
395
396impl<T> Clone for GcHelper<T> {
397 fn clone(&self) -> Self {
398 Self {
399 data: self.data.clone(),
400 weak_refs: self.weak_refs.clone(),
401 }
402 }
403}
404
405pub struct MemoryMonitor {
407 start_time: Instant,
408 peak_memory: u64,
409 current_memory: u64,
410 samples: Vec<(Instant, u64)>,
411}
412
413impl MemoryMonitor {
414 pub fn new() -> Self {
415 Self {
416 start_time: Instant::now(),
417 peak_memory: 0,
418 current_memory: 0,
419 samples: Vec::new(),
420 }
421 }
422
423 pub fn update(&mut self, memory_usage: u64) {
424 self.current_memory = memory_usage;
425 if memory_usage > self.peak_memory {
426 self.peak_memory = memory_usage;
427 }
428 self.samples.push((Instant::now(), memory_usage));
429 }
430
431 pub fn peak_memory(&self) -> u64 {
432 self.peak_memory
433 }
434
435 pub fn current_memory(&self) -> u64 {
436 self.current_memory
437 }
438
439 pub fn average_memory(&self) -> f64 {
440 if self.samples.is_empty() {
441 return 0.0;
442 }
443 let sum: u64 = self.samples.iter().map(|(_, mem)| *mem).sum();
444 sum as f64 / self.samples.len() as f64
445 }
446
447 pub fn memory_over_time(&self) -> &[(Instant, u64)] {
448 &self.samples
449 }
450
451 pub fn duration(&self) -> Duration {
452 Instant::now().duration_since(self.start_time)
453 }
454}
455
456impl Default for MemoryMonitor {
457 fn default() -> Self {
458 Self::new()
459 }
460}
461
462#[derive(Debug, Clone)]
466pub struct SafeVec<T> {
467 data: Vec<T>,
468 bounds_check: bool,
469}
470
471impl<T> SafeVec<T> {
472 pub fn new() -> Self {
474 Self {
475 data: Vec::new(),
476 bounds_check: true,
477 }
478 }
479
480 pub fn with_capacity(capacity: usize) -> Self {
482 Self {
483 data: Vec::with_capacity(capacity),
484 bounds_check: true,
485 }
486 }
487
488 pub fn from_vec(vec: Vec<T>) -> Self {
490 Self {
491 data: vec,
492 bounds_check: true,
493 }
494 }
495
496 pub fn disable_bounds_check(mut self) -> Self {
498 self.bounds_check = false;
499 self
500 }
501
502 pub fn get(&self, index: usize) -> UtilsResult<&T> {
504 if self.bounds_check && index >= self.data.len() {
505 return Err(UtilsError::InvalidParameter(format!(
506 "Index {} out of bounds for vector of length {}",
507 index,
508 self.data.len()
509 )));
510 }
511 self.data
512 .get(index)
513 .ok_or_else(|| UtilsError::InvalidParameter(format!("Index {index} out of bounds")))
514 }
515
516 pub fn get_mut(&mut self, index: usize) -> UtilsResult<&mut T> {
518 if self.bounds_check && index >= self.data.len() {
519 return Err(UtilsError::InvalidParameter(format!(
520 "Index {} out of bounds for vector of length {}",
521 index,
522 self.data.len()
523 )));
524 }
525 let len = self.data.len();
526 self.data.get_mut(index).ok_or_else(|| {
527 UtilsError::InvalidParameter(format!(
528 "Index {index} out of bounds for vector of length {len}"
529 ))
530 })
531 }
532
533 pub fn safe_slice(&self, start: usize, end: usize) -> UtilsResult<&[T]> {
535 if self.bounds_check {
536 if start > end {
537 return Err(UtilsError::InvalidParameter(
538 "Start index cannot be greater than end index".to_string(),
539 ));
540 }
541 if end > self.data.len() {
542 return Err(UtilsError::InvalidParameter(format!(
543 "End index {end} out of bounds for vector of length {}",
544 self.data.len()
545 )));
546 }
547 }
548 Ok(&self.data[start..end])
549 }
550
551 pub fn push(&mut self, item: T) {
553 self.data.push(item);
554 }
555
556 pub fn pop(&mut self) -> Option<T> {
558 self.data.pop()
559 }
560
561 pub fn len(&self) -> usize {
563 self.data.len()
564 }
565
566 pub fn is_empty(&self) -> bool {
568 self.data.is_empty()
569 }
570
571 pub fn capacity(&self) -> usize {
573 self.data.capacity()
574 }
575
576 pub fn reserve(&mut self, additional: usize) {
578 self.data.reserve(additional);
579 }
580
581 pub unsafe fn as_vec(&self) -> &Vec<T> {
590 &self.data
591 }
592
593 pub fn into_vec(self) -> Vec<T> {
595 self.data
596 }
597}
598
599impl<T> Default for SafeVec<T> {
600 fn default() -> Self {
601 Self::new()
602 }
603}
604
605#[derive(Debug, Clone)]
607pub struct SafeBuffer<T> {
608 data: Vec<T>,
609 capacity: usize,
610 size: usize,
611 overflow_protection: bool,
612}
613
614impl<T: Clone> SafeBuffer<T> {
615 pub fn new(capacity: usize, default_value: T) -> Self {
617 Self {
618 data: vec![default_value; capacity],
619 capacity,
620 size: 0,
621 overflow_protection: true,
622 }
623 }
624
625 pub fn write(&mut self, index: usize, value: T) -> UtilsResult<()> {
627 if self.overflow_protection && index >= self.capacity {
628 return Err(UtilsError::InvalidParameter(format!(
629 "Buffer overflow: index {} exceeds capacity {}",
630 index, self.capacity
631 )));
632 }
633
634 if index < self.data.len() {
635 self.data[index] = value;
636 self.size = self.size.max(index + 1);
637 Ok(())
638 } else {
639 Err(UtilsError::InvalidParameter(format!(
640 "Index {} out of bounds for buffer of capacity {}",
641 index, self.capacity
642 )))
643 }
644 }
645
646 pub fn read(&self, index: usize) -> UtilsResult<&T> {
648 if index >= self.size {
649 return Err(UtilsError::InvalidParameter(format!(
650 "Index {} out of bounds for buffer of size {}",
651 index, self.size
652 )));
653 }
654
655 self.data
656 .get(index)
657 .ok_or_else(|| UtilsError::InvalidParameter(format!("Index {index} out of bounds")))
658 }
659
660 pub fn append(&mut self, value: T) -> UtilsResult<()> {
662 if self.size >= self.capacity {
663 return Err(UtilsError::InvalidParameter(
664 "Buffer overflow: cannot append to full buffer".to_string(),
665 ));
666 }
667
668 self.data[self.size] = value;
669 self.size += 1;
670 Ok(())
671 }
672
673 pub fn size(&self) -> usize {
675 self.size
676 }
677
678 pub fn capacity(&self) -> usize {
680 self.capacity
681 }
682
683 pub fn is_full(&self) -> bool {
685 self.size >= self.capacity
686 }
687
688 pub fn clear(&mut self) {
690 self.size = 0;
691 }
692
693 pub unsafe fn disable_overflow_protection(&mut self) {
702 self.overflow_protection = false;
703 }
704}
705
706pub struct SafePtr<T> {
708 data: Arc<RwLock<Option<T>>>,
709 cleanup_fn: Option<Box<dyn Fn() + Send + Sync>>,
710}
711
712impl<T> std::fmt::Debug for SafePtr<T> {
713 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
714 f.debug_struct("SafePtr")
715 .field("data", &"Arc<RwLock<Option<T>>>")
716 .field(
717 "cleanup_fn",
718 &self.cleanup_fn.as_ref().map(|_| "Some(cleanup_fn)"),
719 )
720 .finish()
721 }
722}
723
724impl<T> SafePtr<T> {
725 pub fn new(value: T) -> Self {
727 Self {
728 data: Arc::new(RwLock::new(Some(value))),
729 cleanup_fn: None,
730 }
731 }
732
733 pub fn with_cleanup<F>(value: T, cleanup: F) -> Self
735 where
736 F: Fn() + Send + Sync + 'static,
737 {
738 Self {
739 data: Arc::new(RwLock::new(Some(value))),
740 cleanup_fn: Some(Box::new(cleanup)),
741 }
742 }
743
744 pub fn try_read(&self) -> UtilsResult<std::sync::RwLockReadGuard<'_, Option<T>>> {
746 self.data
747 .read()
748 .map_err(|e| UtilsError::InvalidParameter(format!("Failed to acquire read lock: {e}")))
749 }
750
751 pub fn try_write(&self) -> UtilsResult<std::sync::RwLockWriteGuard<'_, Option<T>>> {
753 self.data
754 .write()
755 .map_err(|e| UtilsError::InvalidParameter(format!("Failed to acquire write lock: {e}")))
756 }
757
758 pub fn is_valid(&self) -> bool {
760 if let Ok(guard) = self.data.read() {
761 guard.is_some()
762 } else {
763 false
764 }
765 }
766
767 pub fn take(&self) -> UtilsResult<Option<T>> {
769 let mut guard = self.try_write()?;
770 Ok(guard.take())
771 }
772
773 pub fn ref_count(&self) -> usize {
775 Arc::strong_count(&self.data)
776 }
777}
778
779impl<T> Clone for SafePtr<T> {
780 fn clone(&self) -> Self {
781 Self {
782 data: self.data.clone(),
783 cleanup_fn: None, }
785 }
786}
787
788impl<T> Drop for SafePtr<T> {
789 fn drop(&mut self) {
790 if Arc::strong_count(&self.data) == 1 {
792 if let Some(cleanup) = &self.cleanup_fn {
793 cleanup();
794 }
795 }
796 }
797}
798
799pub struct MemoryAlignment;
801
802impl MemoryAlignment {
803 pub fn is_aligned<T>(ptr: *const T, alignment: usize) -> bool {
805 (ptr as usize) % alignment == 0
806 }
807
808 pub fn alignment_of<T>() -> usize {
810 std::mem::align_of::<T>()
811 }
812
813 pub fn aligned_size(size: usize, alignment: usize) -> usize {
815 (size + alignment - 1) & !(alignment - 1)
816 }
817
818 pub fn aligned_layout(
820 size: usize,
821 alignment: usize,
822 ) -> Result<Layout, std::alloc::LayoutError> {
823 Layout::from_size_align(size, alignment)
824 }
825}
826
827pub struct StackGuard<F: FnOnce()> {
829 cleanup: Option<F>,
830}
831
832impl<F: FnOnce()> StackGuard<F> {
833 pub fn new(cleanup: F) -> Self {
835 Self {
836 cleanup: Some(cleanup),
837 }
838 }
839
840 pub fn cleanup(mut self) {
842 if let Some(cleanup) = self.cleanup.take() {
843 cleanup();
844 }
845 }
846}
847
848impl<F: FnOnce()> Drop for StackGuard<F> {
849 fn drop(&mut self) {
850 if let Some(cleanup) = self.cleanup.take() {
851 cleanup();
852 }
853 }
854}
855
856#[macro_export]
858macro_rules! defer {
859 ($cleanup:expr) => {
860 let _guard = $crate::memory::StackGuard::new(|| $cleanup);
861 };
862}
863
864pub struct MemoryValidator;
866
867impl MemoryValidator {
868 pub unsafe fn validate_range<T>(ptr: *const T, count: usize) -> UtilsResult<()> {
877 if ptr.is_null() {
878 return Err(UtilsError::InvalidParameter("Null pointer".to_string()));
879 }
880
881 let end_ptr = unsafe { ptr.add(count) };
883 if end_ptr < ptr {
884 return Err(UtilsError::InvalidParameter("Pointer overflow".to_string()));
885 }
886
887 Ok(())
888 }
889
890 pub fn validate_alignment<T>(ptr: *const T, required_alignment: usize) -> UtilsResult<()> {
892 if !MemoryAlignment::is_aligned(ptr, required_alignment) {
893 return Err(UtilsError::InvalidParameter(format!(
894 "Pointer not aligned to {required_alignment} byte boundary"
895 )));
896 }
897 Ok(())
898 }
899
900 pub fn validate_buffer_access(
902 buffer_size: usize,
903 offset: usize,
904 access_size: usize,
905 ) -> UtilsResult<()> {
906 if offset >= buffer_size {
907 return Err(UtilsError::InvalidParameter(format!(
908 "Offset {offset} exceeds buffer size {buffer_size}"
909 )));
910 }
911
912 if offset + access_size > buffer_size {
913 return Err(UtilsError::InvalidParameter(format!(
914 "Access range {}..{} exceeds buffer size {}",
915 offset,
916 offset + access_size,
917 buffer_size
918 )));
919 }
920
921 Ok(())
922 }
923}
924
925#[allow(non_snake_case)]
926#[cfg(test)]
927mod tests {
928 use super::*;
929 use std::alloc::System;
930
931 #[test]
932 fn test_tracking_allocator_stats() {
933 let allocator = TrackingAllocator::new(System);
934 let initial_stats = allocator.stats();
935 assert_eq!(initial_stats.allocation_count, 0);
936 assert_eq!(initial_stats.current_allocated, 0);
937 }
938
939 #[test]
940 fn test_memory_pool() {
941 let mut pool: MemoryPool<u64> = MemoryPool::new(10);
942
943 {
944 let item1 = pool.allocate().unwrap();
945 *item1 = 42;
946 assert_eq!(*item1, 42);
947 }
948 assert_eq!(pool.used(), 1);
949
950 {
951 let item2 = pool.allocate().unwrap();
952 *item2 = 84;
953 assert_eq!(*item2, 84);
954 }
955 assert_eq!(pool.used(), 2);
956
957 assert_eq!(pool.capacity(), 10);
960 }
961
962 #[test]
963 fn test_leak_detector() {
964 let detector = LeakDetector::new();
965 let ptr = Box::into_raw(Box::new(42u64));
966
967 detector.track_allocation(ptr as *mut u8, 8);
968 assert_eq!(detector.total_leaked_bytes(), 8);
969
970 detector.track_deallocation(ptr as *mut u8);
971 assert_eq!(detector.total_leaked_bytes(), 0);
972
973 unsafe { drop(Box::from_raw(ptr)) };
974 }
975
976 #[test]
977 fn test_gc_helper() {
978 let gc = GcHelper::new(42u64);
979 assert_eq!(gc.ref_count(), 1);
980
981 let strong_ref = gc.get_ref();
982 assert_eq!(gc.ref_count(), 2);
983 assert_eq!(*strong_ref, 42);
984
985 let weak_ref = gc.get_weak_ref();
986 assert!(weak_ref.upgrade().is_some());
987
988 drop(strong_ref);
989 assert_eq!(gc.ref_count(), 1);
990 }
991
992 #[test]
993 fn test_memory_monitor() {
994 let mut monitor = MemoryMonitor::new();
995 assert_eq!(monitor.peak_memory(), 0);
996 assert_eq!(monitor.current_memory(), 0);
997
998 monitor.update(1024);
999 assert_eq!(monitor.peak_memory(), 1024);
1000 assert_eq!(monitor.current_memory(), 1024);
1001
1002 monitor.update(512);
1003 assert_eq!(monitor.peak_memory(), 1024);
1004 assert_eq!(monitor.current_memory(), 512);
1005
1006 monitor.update(2048);
1007 assert_eq!(monitor.peak_memory(), 2048);
1008 assert_eq!(monitor.current_memory(), 2048);
1009
1010 assert_eq!(monitor.average_memory(), (1024.0 + 512.0 + 2048.0) / 3.0);
1011 }
1012
1013 #[test]
1014 fn test_safe_vec() {
1015 let mut safe_vec = SafeVec::new();
1016 safe_vec.push(1);
1017 safe_vec.push(2);
1018 safe_vec.push(3);
1019
1020 assert_eq!(*safe_vec.get(0).unwrap(), 1);
1022 assert_eq!(*safe_vec.get(2).unwrap(), 3);
1023
1024 assert!(safe_vec.get(5).is_err());
1026
1027 let slice = safe_vec.safe_slice(1, 3).unwrap();
1029 assert_eq!(slice, &[2, 3]);
1030
1031 assert!(safe_vec.safe_slice(2, 5).is_err());
1033 assert!(safe_vec.safe_slice(3, 2).is_err());
1034 }
1035
1036 #[test]
1037 fn test_safe_buffer() {
1038 let mut buffer = SafeBuffer::new(5, 0);
1039
1040 buffer.write(0, 42).unwrap();
1042 buffer.write(1, 84).unwrap();
1043
1044 assert_eq!(*buffer.read(0).unwrap(), 42);
1045 assert_eq!(*buffer.read(1).unwrap(), 84);
1046 assert_eq!(buffer.size(), 2);
1047
1048 buffer.append(100).unwrap();
1050 buffer.append(200).unwrap();
1051 buffer.append(300).unwrap();
1052
1053 assert!(buffer.is_full());
1054 assert!(buffer.append(400).is_err()); assert!(buffer.write(10, 500).is_err()); }
1059
1060 #[test]
1061 fn test_safe_ptr() {
1062 let ptr = SafePtr::new(42);
1063 assert!(ptr.is_valid());
1064 assert_eq!(ptr.ref_count(), 1);
1065
1066 let _ptr2 = ptr.clone();
1068 assert_eq!(ptr.ref_count(), 2);
1069
1070 {
1072 let guard = ptr.try_read().unwrap();
1073 assert_eq!(*guard, Some(42));
1074 }
1075
1076 let value = ptr.take().unwrap();
1078 assert_eq!(value, Some(42));
1079 assert!(!ptr.is_valid());
1080 }
1081
1082 #[test]
1083 fn test_memory_alignment() {
1084 let data = 42u64;
1086 let ptr = &data as *const u64;
1087
1088 assert!(MemoryAlignment::is_aligned(ptr, 8)); assert_eq!(MemoryAlignment::alignment_of::<u64>(), 8);
1090
1091 assert_eq!(MemoryAlignment::aligned_size(10, 8), 16);
1093 assert_eq!(MemoryAlignment::aligned_size(16, 8), 16);
1094 assert_eq!(MemoryAlignment::aligned_size(17, 8), 24);
1095 }
1096
1097 #[test]
1098 fn test_stack_guard() {
1099 use std::sync::Arc;
1100
1101 let cleanup_called = Arc::new(Mutex::new(false));
1102 let cleanup_called_clone = cleanup_called.clone();
1103
1104 {
1105 let _guard = StackGuard::new(|| {
1106 *cleanup_called_clone.lock().unwrap() = true;
1107 });
1108
1109 assert!(!*cleanup_called.lock().unwrap());
1111 } assert!(*cleanup_called.lock().unwrap());
1115 }
1116
1117 #[test]
1118 fn test_memory_validator() {
1119 let null_ptr: *const u8 = std::ptr::null();
1121 assert!(unsafe { MemoryValidator::validate_range(null_ptr, 10) }.is_err());
1122
1123 let data = [1u8, 2, 3, 4, 5];
1125 let ptr = data.as_ptr();
1126 assert!(unsafe { MemoryValidator::validate_range(ptr, 5) }.is_ok());
1127
1128 let aligned_ptr = &42u64 as *const u64;
1130 assert!(MemoryValidator::validate_alignment(aligned_ptr, 8).is_ok());
1131
1132 assert!(MemoryValidator::validate_buffer_access(10, 0, 5).is_ok());
1134 assert!(MemoryValidator::validate_buffer_access(10, 5, 5).is_ok());
1135 assert!(MemoryValidator::validate_buffer_access(10, 10, 1).is_err()); assert!(MemoryValidator::validate_buffer_access(10, 8, 5).is_err()); }
1138
1139 #[test]
1140 fn test_defer_macro() {
1141 use std::sync::Arc;
1142
1143 let cleanup_called = Arc::new(Mutex::new(false));
1144 let cleanup_called_clone = cleanup_called.clone();
1145
1146 {
1147 defer!({
1148 *cleanup_called_clone.lock().unwrap() = true;
1149 });
1150
1151 assert!(!*cleanup_called.lock().unwrap());
1153 } assert!(*cleanup_called.lock().unwrap());
1157 }
1158}