Skip to main content

torsh_backend/
memory.rs

1//! Memory management abstractions
2
3use crate::{Buffer, BufferDescriptor, Device};
4use torsh_core::error::{Result, TorshError};
5
6#[cfg(not(feature = "std"))]
7use alloc::{boxed::Box, vec::Vec};
8
9/// Unified memory management interface across all backends
10pub trait MemoryManager: Send + Sync {
11    /// Allocate a buffer
12    fn allocate(&mut self, descriptor: &BufferDescriptor) -> Result<Buffer>;
13
14    /// Deallocate a buffer
15    fn deallocate(&mut self, buffer: &Buffer) -> Result<()>;
16
17    /// Get memory statistics
18    fn stats(&self) -> MemoryStats;
19
20    /// Garbage collect unused memory
21    fn garbage_collect(&mut self) -> Result<usize>;
22
23    /// Set memory pool for efficient allocation
24    fn set_pool(&mut self, pool: Box<dyn MemoryPool>) -> Result<()>;
25
26    /// Get the device this manager is for
27    fn device(&self) -> &Device;
28
29    /// Allocate raw memory with specific alignment requirements
30    fn allocate_raw(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
31
32    /// Deallocate raw memory
33    fn deallocate_raw(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
34
35    /// Check if memory manager supports unified memory
36    fn supports_unified_memory(&self) -> bool;
37
38    /// Allocate unified memory (host-device accessible)
39    fn allocate_unified(&mut self, size: usize) -> Result<*mut u8>;
40
41    /// Deallocate unified memory
42    fn deallocate_unified(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
43
44    /// Prefetch memory to device (for unified memory)
45    fn prefetch_to_device(&self, ptr: *mut u8, size: usize) -> Result<()>;
46
47    /// Prefetch memory to host (for unified memory)
48    fn prefetch_to_host(&self, ptr: *mut u8, size: usize) -> Result<()>;
49
50    /// Set memory access advice (for unified memory optimization)
51    fn set_memory_advice(&self, ptr: *mut u8, size: usize, advice: MemoryAdvice) -> Result<()>;
52
53    /// Get available memory on device
54    fn available_memory(&self) -> Result<usize>;
55
56    /// Get total memory on device
57    fn total_memory(&self) -> Result<usize>;
58
59    /// Synchronize all pending memory operations
60    fn synchronize(&self) -> Result<()>;
61
62    /// Defragment memory to reduce fragmentation
63    fn defragment(&mut self) -> Result<DefragmentationResult>;
64
65    /// Check if defragmentation is needed
66    fn needs_defragmentation(&self) -> bool;
67
68    /// Get memory fragmentation information
69    fn fragmentation_info(&self) -> FragmentationInfo;
70
71    /// Compact memory by moving allocated blocks together
72    fn compact_memory(&mut self) -> Result<CompactionResult>;
73
74    /// Set defragmentation policy
75    fn set_defragmentation_policy(&mut self, policy: DefragmentationPolicy);
76}
77
78/// Memory pool interface for efficient allocation
79pub trait MemoryPool: Send + Sync {
80    /// Allocate memory from the pool
81    fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
82
83    /// Deallocate memory back to the pool
84    fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
85
86    /// Get pool statistics
87    fn stats(&self) -> PoolStats;
88
89    /// Reset the pool (deallocate all memory)
90    fn reset(&mut self) -> Result<()>;
91
92    /// Get total pool capacity
93    fn capacity(&self) -> usize;
94
95    /// Get available memory in pool
96    fn available(&self) -> usize;
97
98    /// Defragment the memory pool
99    fn defragment(&mut self) -> Result<DefragmentationResult>;
100
101    /// Check if the pool needs defragmentation
102    fn needs_defragmentation(&self) -> bool;
103
104    /// Get pool fragmentation information
105    fn fragmentation_info(&self) -> FragmentationInfo;
106
107    /// Compact allocated blocks in the pool
108    fn compact(&mut self) -> Result<CompactionResult>;
109}
110
111/// Memory usage statistics
112#[derive(Debug, Clone)]
113pub struct MemoryStats {
114    /// Total device memory in bytes
115    pub total_memory: usize,
116
117    /// Currently allocated memory in bytes
118    pub allocated_memory: usize,
119
120    /// Available memory in bytes
121    pub available_memory: usize,
122
123    /// Peak memory usage in bytes
124    pub peak_memory: usize,
125
126    /// Number of active allocations
127    pub active_allocations: usize,
128
129    /// Total number of allocations made
130    pub total_allocations: usize,
131
132    /// Total number of deallocations made
133    pub total_deallocations: usize,
134
135    /// Memory fragmentation ratio (0.0 to 1.0)
136    pub fragmentation: f32,
137
138    /// Allocation efficiency (allocated / total)
139    pub efficiency: f32,
140}
141
142impl Default for MemoryStats {
143    fn default() -> Self {
144        Self {
145            total_memory: 0,
146            allocated_memory: 0,
147            available_memory: 0,
148            peak_memory: 0,
149            active_allocations: 0,
150            total_allocations: 0,
151            total_deallocations: 0,
152            fragmentation: 0.0,
153            efficiency: 0.0,
154        }
155    }
156}
157
158impl MemoryStats {
159    /// Calculate utilization percentage
160    pub fn utilization(&self) -> f32 {
161        if self.total_memory == 0 {
162            0.0
163        } else {
164            (self.allocated_memory as f32 / self.total_memory as f32) * 100.0
165        }
166    }
167
168    /// Check if memory pressure is high
169    pub fn is_under_pressure(&self) -> bool {
170        self.utilization() > 90.0 || self.fragmentation > 0.5
171    }
172}
173
174/// Memory pool statistics
175#[derive(Debug, Clone, Default)]
176pub struct PoolStats {
177    /// Total pool capacity in bytes
178    pub capacity: usize,
179
180    /// Currently allocated bytes from pool
181    pub allocated: usize,
182
183    /// Available bytes in pool
184    pub available: usize,
185
186    /// Number of free blocks
187    pub free_blocks: usize,
188
189    /// Number of allocated blocks
190    pub allocated_blocks: usize,
191
192    /// Largest free block size
193    pub largest_free_block: usize,
194
195    /// Smallest free block size
196    pub smallest_free_block: usize,
197
198    /// Average free block size
199    pub average_free_block: usize,
200}
201
202// Note: FreeListPool implementation removed due to unsafe null pointer usage.
203// Backend-specific memory pools should be implemented by each backend using
204// proper memory allocation for their respective devices.
205
206/// FreeListPool memory allocator implementation
207#[derive(Debug)]
208pub struct FreeListPool {
209    /// Base pointer to the memory region
210    base_ptr: *mut u8,
211    /// Total size of the memory pool
212    total_size: usize,
213    /// List of free blocks as (offset, size) pairs
214    free_blocks: Vec<(usize, usize)>,
215    /// List of allocated blocks as (offset, size) pairs
216    allocated_blocks: Vec<(usize, usize)>,
217    /// Memory statistics
218    stats: MemoryStats,
219}
220
221impl FreeListPool {
222    /// Create a new FreeListPool with the given base pointer and size
223    pub fn new(base_ptr: *mut u8, total_size: usize) -> Self {
224        let mut pool = Self {
225            base_ptr,
226            total_size,
227            free_blocks: vec![(0, total_size)],
228            allocated_blocks: Vec::new(),
229            stats: MemoryStats::default(),
230        };
231        pool.update_stats();
232        pool
233    }
234
235    /// Find a suitable free block for the given size and alignment using first-fit strategy
236    fn find_free_block(&self, size: usize, alignment: usize) -> Option<usize> {
237        self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
238    }
239
240    /// Find a suitable free block using the specified allocation strategy
241    fn find_free_block_with_strategy(
242        &self,
243        size: usize,
244        alignment: usize,
245        strategy: AllocationStrategy,
246    ) -> Option<usize> {
247        match strategy {
248            AllocationStrategy::FirstFit => self
249                .free_blocks
250                .iter()
251                .enumerate()
252                .find(|(_, &(offset, block_size))| {
253                    let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
254                    let padding = aligned_offset - offset;
255                    padding + size <= block_size
256                })
257                .map(|(idx, _)| idx),
258            AllocationStrategy::BestFit => {
259                let mut best_idx = None;
260                let mut best_size = usize::MAX;
261
262                for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
263                    let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
264                    let padding = aligned_offset - offset;
265
266                    if padding + size <= block_size && block_size < best_size {
267                        best_idx = Some(idx);
268                        best_size = block_size;
269                    }
270                }
271
272                best_idx
273            }
274            AllocationStrategy::WorstFit => {
275                let mut worst_idx = None;
276                let mut worst_size = 0;
277
278                for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
279                    let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
280                    let padding = aligned_offset - offset;
281
282                    if padding + size <= block_size && block_size > worst_size {
283                        worst_idx = Some(idx);
284                        worst_size = block_size;
285                    }
286                }
287
288                worst_idx
289            }
290            AllocationStrategy::NextFit => {
291                // For simplicity, fall back to first fit
292                // In a real implementation, we'd maintain a cursor for next fit
293                self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
294            }
295        }
296    }
297
298    /// Update memory statistics
299    fn update_stats(&mut self) {
300        let allocated: usize = self.allocated_blocks.iter().map(|(_, size)| size).sum();
301        let available: usize = self.free_blocks.iter().map(|(_, size)| size).sum();
302
303        self.stats.allocated_memory = allocated;
304        self.stats.available_memory = available;
305        self.stats.active_allocations = self.allocated_blocks.len();
306        self.stats.total_memory = self.total_size;
307        self.stats.efficiency = if self.total_size > 0 {
308            allocated as f32 / self.total_size as f32
309        } else {
310            0.0
311        };
312        self.stats.fragmentation = if available > 0 {
313            1.0 - (self
314                .free_blocks
315                .iter()
316                .map(|(_, size)| *size)
317                .max()
318                .unwrap_or(0) as f32
319                / available as f32)
320        } else {
321            0.0
322        };
323    }
324
325    /// Get the total capacity of the memory pool
326    pub fn capacity(&self) -> usize {
327        self.total_size
328    }
329
330    /// Coalesce adjacent free blocks to reduce fragmentation
331    fn coalesce_free_blocks(&mut self) {
332        if self.free_blocks.len() <= 1 {
333            return;
334        }
335
336        // Sort free blocks by offset
337        self.free_blocks.sort_by_key(|(offset, _)| *offset);
338
339        // Coalesce adjacent blocks
340        let mut i = 0;
341        while i < self.free_blocks.len().saturating_sub(1) {
342            let (offset1, size1) = self.free_blocks[i];
343            let (offset2, size2) = self.free_blocks[i + 1];
344
345            // Check if blocks are adjacent
346            if offset1 + size1 == offset2 {
347                // Merge blocks
348                self.free_blocks[i] = (offset1, size1 + size2);
349                self.free_blocks.remove(i + 1);
350                // Don't increment i to check if this merged block can be further coalesced
351            } else {
352                i += 1;
353            }
354        }
355    }
356
357    /// Detect potential memory leaks by finding long-lived allocations
358    pub fn detect_leaks(&self) -> Vec<LeakReport> {
359        // In a real implementation, we'd track allocation timestamps
360        // For now, report allocations that seem suspiciously large or numerous
361        let mut reports = Vec::new();
362
363        if self.allocated_blocks.len() > 1000 {
364            reports.push(LeakReport {
365                leak_type: LeakType::TooManyAllocations,
366                block_count: self.allocated_blocks.len(),
367                total_size: self.stats.allocated_memory,
368                severity: LeakSeverity::High,
369                description: format!(
370                    "Too many active allocations: {}",
371                    self.allocated_blocks.len()
372                ),
373            });
374        }
375
376        // Check for very large allocations that might be leaks
377        for &(offset, size) in &self.allocated_blocks {
378            if size > self.total_size / 4 {
379                // More than 25% of total memory
380                reports.push(LeakReport {
381                    leak_type: LeakType::LargeAllocation,
382                    block_count: 1,
383                    total_size: size,
384                    severity: LeakSeverity::Medium,
385                    description: format!("Large allocation at offset {}: {} bytes", offset, size),
386                });
387            }
388        }
389
390        reports
391    }
392
393    /// Validate internal consistency of the memory pool
394    pub fn validate_consistency(&self) -> Result<()> {
395        // Check for overlapping allocated blocks
396        for i in 0..self.allocated_blocks.len() {
397            for j in (i + 1)..self.allocated_blocks.len() {
398                let (offset1, size1) = self.allocated_blocks[i];
399                let (offset2, size2) = self.allocated_blocks[j];
400
401                let end1 = offset1 + size1;
402                let end2 = offset2 + size2;
403
404                if offset1 < end2 && offset2 < end1 {
405                    return Err(TorshError::AllocationError(format!(
406                        "Overlapping allocations detected: [{}, {}) and [{}, {})",
407                        offset1, end1, offset2, end2
408                    )));
409                }
410            }
411        }
412
413        // Check for overlapping free blocks
414        for i in 0..self.free_blocks.len() {
415            for j in (i + 1)..self.free_blocks.len() {
416                let (offset1, size1) = self.free_blocks[i];
417                let (offset2, size2) = self.free_blocks[j];
418
419                let end1 = offset1 + size1;
420                let end2 = offset2 + size2;
421
422                if offset1 < end2 && offset2 < end1 {
423                    return Err(TorshError::AllocationError(format!(
424                        "Overlapping free blocks detected: [{}, {}) and [{}, {})",
425                        offset1, end1, offset2, end2
426                    )));
427                }
428            }
429        }
430
431        // Check for out-of-bounds blocks
432        for &(offset, size) in &self.allocated_blocks {
433            if offset + size > self.total_size {
434                return Err(TorshError::AllocationError(format!(
435                    "Allocated block extends beyond pool: offset={}, size={}, pool_size={}",
436                    offset, size, self.total_size
437                )));
438            }
439        }
440
441        for &(offset, size) in &self.free_blocks {
442            if offset + size > self.total_size {
443                return Err(TorshError::AllocationError(format!(
444                    "Free block extends beyond pool: offset={}, size={}, pool_size={}",
445                    offset, size, self.total_size
446                )));
447            }
448        }
449
450        Ok(())
451    }
452}
453
454impl MemoryPool for FreeListPool {
455    fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8> {
456        // Input validation
457        if size == 0 {
458            return Err(TorshError::InvalidArgument(
459                "Allocation size cannot be zero".to_string(),
460            ));
461        }
462
463        if alignment == 0 || !alignment.is_power_of_two() {
464            return Err(TorshError::InvalidArgument(format!(
465                "Alignment must be a power of two and non-zero, got: {}",
466                alignment
467            )));
468        }
469
470        // Check for potential overflow in size + alignment
471        if size > self.total_size || alignment > self.total_size {
472            return Err(TorshError::AllocationError(format!(
473                "Requested size ({}) or alignment ({}) exceeds pool capacity ({})",
474                size, alignment, self.total_size
475            )));
476        }
477
478        if let Some(block_idx) = self.find_free_block(size, alignment) {
479            let (offset, block_size) = self.free_blocks[block_idx];
480            let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
481            let padding = aligned_offset - offset;
482            let required_size = padding + size;
483
484            // Remove the free block
485            self.free_blocks.remove(block_idx);
486
487            // Add padding as a new free block if needed
488            if padding > 0 {
489                self.free_blocks.push((offset, padding));
490            }
491
492            // Add remaining space as a new free block if any
493            if required_size < block_size {
494                let remaining_offset = offset + required_size;
495                let remaining_size = block_size - required_size;
496                self.free_blocks.push((remaining_offset, remaining_size));
497            }
498
499            // Record the allocation
500            self.allocated_blocks.push((aligned_offset, size));
501
502            // Update statistics
503            self.update_stats();
504
505            // Return aligned pointer (simplified - real implementation would use actual memory)
506            Ok(unsafe { self.base_ptr.add(aligned_offset) })
507        } else {
508            Err(TorshError::AllocationError(format!(
509                "Out of memory: requested {} bytes, available memory is {} bytes",
510                size, self.stats.available_memory
511            )))
512        }
513    }
514
515    #[allow(clippy::not_unsafe_ptr_arg_deref)]
516    fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()> {
517        // Input validation
518        if ptr.is_null() {
519            return Err(TorshError::InvalidArgument(
520                "Cannot deallocate null pointer".to_string(),
521            ));
522        }
523
524        if size == 0 {
525            return Err(TorshError::InvalidArgument(
526                "Cannot deallocate zero-sized block".to_string(),
527            ));
528        }
529
530        // Safety check: ensure pointer is within our memory range
531        if ptr < self.base_ptr || ptr >= unsafe { self.base_ptr.add(self.total_size) } {
532            return Err(TorshError::InvalidArgument(
533                "Pointer outside of memory pool range".to_string(),
534            ));
535        }
536
537        // Calculate offset from base pointer
538        // Safety: This is unsafe because it operates on raw pointers, but we've validated the bounds
539        let offset = unsafe { ptr.offset_from(self.base_ptr) } as usize;
540
541        // Find and remove the allocation
542        if let Some(pos) = self
543            .allocated_blocks
544            .iter()
545            .position(|&(off, sz)| off == offset && sz == size)
546        {
547            self.allocated_blocks.remove(pos);
548
549            // Add back to free list
550            self.free_blocks.push((offset, size));
551
552            // Coalesce adjacent free blocks
553            self.coalesce_free_blocks();
554
555            // Update statistics
556            self.update_stats();
557
558            Ok(())
559        } else {
560            Err(TorshError::InvalidArgument(
561                "Invalid deallocation: block not found".to_string(),
562            ))
563        }
564    }
565
566    fn stats(&self) -> PoolStats {
567        PoolStats {
568            capacity: self.total_size,
569            allocated: self.stats.allocated_memory,
570            available: self.stats.available_memory,
571            free_blocks: self.free_blocks.len(),
572            allocated_blocks: self.allocated_blocks.len(),
573            largest_free_block: self
574                .free_blocks
575                .iter()
576                .map(|(_, size)| *size)
577                .max()
578                .unwrap_or(0),
579            smallest_free_block: self
580                .free_blocks
581                .iter()
582                .map(|(_, size)| *size)
583                .min()
584                .unwrap_or(0),
585            average_free_block: if self.free_blocks.is_empty() {
586                0
587            } else {
588                self.stats.available_memory / self.free_blocks.len()
589            },
590        }
591    }
592
593    fn reset(&mut self) -> Result<()> {
594        self.free_blocks.clear();
595        self.allocated_blocks.clear();
596        self.free_blocks.push((0, self.total_size));
597        self.update_stats();
598        Ok(())
599    }
600
601    fn capacity(&self) -> usize {
602        self.total_size
603    }
604
605    fn available(&self) -> usize {
606        self.stats.available_memory
607    }
608
609    fn defragment(&mut self) -> Result<DefragmentationResult> {
610        // Simple stub implementation for FreeListPool
611        Ok(DefragmentationResult {
612            blocks_moved: 0,
613            memory_compacted: 0,
614            duration_ms: 0.0,
615            fragmentation_before: 0.0,
616            fragmentation_after: 0.0,
617            efficiency_improvement: 0.0,
618            success: true,
619        })
620    }
621
622    fn needs_defragmentation(&self) -> bool {
623        // Simple heuristic: check if we have many small free blocks
624        self.free_blocks.len() > 10
625    }
626
627    fn fragmentation_info(&self) -> FragmentationInfo {
628        let free_blocks = self.free_blocks.len();
629        let allocated_blocks = self.allocated_blocks.len();
630        let total_free = self.stats.available_memory;
631        let total_allocated = self.stats.allocated_memory;
632
633        let largest_free = self
634            .free_blocks
635            .iter()
636            .map(|(_, size)| *size)
637            .max()
638            .unwrap_or(0);
639
640        let smallest_free = self
641            .free_blocks
642            .iter()
643            .map(|(_, size)| *size)
644            .min()
645            .unwrap_or(0);
646
647        let average_free = if free_blocks > 0 {
648            total_free / free_blocks
649        } else {
650            0
651        };
652
653        let fragmentation = if self.capacity() > 0 {
654            free_blocks as f32 / (free_blocks + allocated_blocks) as f32
655        } else {
656            0.0
657        };
658
659        FragmentationInfo {
660            overall_fragmentation: fragmentation,
661            external_fragmentation: fragmentation * 0.8,
662            internal_fragmentation: fragmentation * 0.2,
663            free_blocks,
664            allocated_blocks,
665            largest_free_block: largest_free,
666            smallest_free_block: smallest_free,
667            average_free_block: average_free,
668            total_free_memory: total_free,
669            total_allocated_memory: total_allocated,
670            utilization_efficiency: if self.capacity() > 0 {
671                total_allocated as f32 / self.capacity() as f32
672            } else {
673                0.0
674            },
675            allocation_efficiency: if self.capacity() > 0 {
676                total_allocated as f32 / self.capacity() as f32
677            } else {
678                0.0
679            },
680        }
681    }
682
683    fn compact(&mut self) -> Result<CompactionResult> {
684        // Simple stub implementation for FreeListPool
685        let free_blocks_before = self.free_blocks.len();
686
687        // Sort free blocks by offset to help with coalescing
688        self.free_blocks.sort_by_key(|(offset, _)| *offset);
689
690        // Try to coalesce adjacent free blocks
691        let mut i = 0;
692        while i < self.free_blocks.len().saturating_sub(1) {
693            let (offset1, size1) = self.free_blocks[i];
694            let (offset2, size2) = self.free_blocks[i + 1];
695
696            if offset1 + size1 == offset2 {
697                // Adjacent blocks, coalesce them
698                self.free_blocks[i] = (offset1, size1 + size2);
699                self.free_blocks.remove(i + 1);
700            } else {
701                i += 1;
702            }
703        }
704
705        let free_blocks_after = self.free_blocks.len();
706
707        Ok(CompactionResult {
708            allocations_moved: 0,
709            bytes_moved: 0,
710            duration_ms: 0.0,
711            largest_free_before: self
712                .free_blocks
713                .iter()
714                .map(|(_, size)| *size)
715                .max()
716                .unwrap_or(0),
717            largest_free_after: self
718                .free_blocks
719                .iter()
720                .map(|(_, size)| *size)
721                .max()
722                .unwrap_or(0),
723            free_blocks_before,
724            free_blocks_after,
725            success: true,
726        })
727    }
728}
729
730unsafe impl Send for FreeListPool {}
731unsafe impl Sync for FreeListPool {}
732
733/// Memory allocation strategy
734#[derive(Debug, Clone, Copy, PartialEq, Eq)]
735pub enum AllocationStrategy {
736    /// First fit - use the first block that's large enough
737    FirstFit,
738
739    /// Best fit - use the smallest block that's large enough
740    BestFit,
741
742    /// Worst fit - use the largest available block
743    WorstFit,
744
745    /// Next fit - like first fit but start from last allocation
746    NextFit,
747}
748
749/// Memory allocation hint
750#[derive(Debug, Clone)]
751pub struct AllocationHint {
752    /// Expected lifetime of the allocation
753    pub lifetime: AllocationLifetime,
754
755    /// Access pattern hint
756    pub access_pattern: AccessPattern,
757
758    /// Preferred allocation strategy
759    pub strategy: AllocationStrategy,
760
761    /// Whether to use memory pool
762    pub use_pool: bool,
763}
764
765/// Expected allocation lifetime
766#[derive(Debug, Clone, Copy, PartialEq, Eq)]
767pub enum AllocationLifetime {
768    /// Very short-lived (microseconds to milliseconds)
769    Temporary,
770
771    /// Short-lived (milliseconds to seconds)
772    Short,
773
774    /// Medium-lived (seconds to minutes)
775    Medium,
776
777    /// Long-lived (minutes to hours)
778    Long,
779
780    /// Persistent (hours to application lifetime)
781    Persistent,
782}
783
784/// Memory access pattern hint
785#[derive(Debug, Clone, Copy, PartialEq, Eq)]
786pub enum AccessPattern {
787    /// Random access
788    Random,
789
790    /// Sequential access
791    Sequential,
792
793    /// Mostly read operations
794    ReadMostly,
795
796    /// Mostly write operations
797    WriteMostly,
798
799    /// Streaming (write once, read sequentially)
800    Streaming,
801}
802
803impl Default for AllocationHint {
804    fn default() -> Self {
805        Self {
806            lifetime: AllocationLifetime::Medium,
807            access_pattern: AccessPattern::Random,
808            strategy: AllocationStrategy::FirstFit,
809            use_pool: true,
810        }
811    }
812}
813
814/// Memory advice for unified memory optimization
815#[derive(Debug, Clone, Copy, PartialEq, Eq)]
816pub enum MemoryAdvice {
817    /// Set preferred location for memory
818    SetPreferredLocation,
819    /// Unset preferred location
820    UnsetPreferredLocation,
821    /// Set which device can access this memory
822    SetAccessedBy,
823    /// Unset device access
824    UnsetAccessedBy,
825    /// Mark memory as read-mostly
826    SetReadMostly,
827    /// Unmark memory as read-mostly
828    UnsetReadMostly,
829}
830
831/// Extended memory manager factory for creating backend-specific managers
832pub trait MemoryManagerFactory: Send + Sync {
833    /// Create a memory manager for the given device
834    fn create_manager(&self, device: &Device) -> Result<Box<dyn MemoryManager>>;
835
836    /// Get the backend type this factory supports
837    fn backend_type(&self) -> crate::BackendType;
838
839    /// Check if factory supports the given device
840    fn supports_device(&self, device: &Device) -> bool;
841}
842
843/// Memory pool configuration
844#[derive(Debug, Clone)]
845pub struct MemoryPoolConfig {
846    /// Initial pool size in bytes
847    pub initial_size: usize,
848
849    /// Maximum pool size in bytes (None for unlimited)
850    pub max_size: Option<usize>,
851
852    /// Growth factor when pool needs to expand
853    pub growth_factor: f32,
854
855    /// Allocation strategy to use
856    pub strategy: AllocationStrategy,
857
858    /// Whether to enable memory coalescing
859    pub enable_coalescing: bool,
860
861    /// Minimum block size for allocations
862    pub min_block_size: usize,
863
864    /// Memory alignment requirement
865    pub alignment: usize,
866
867    /// NUMA allocation strategy (CPU backend only)
868    pub numa_strategy: Option<crate::cpu::memory::NumaAllocationStrategy>,
869}
870
871impl Default for MemoryPoolConfig {
872    fn default() -> Self {
873        Self {
874            initial_size: 64 * 1024 * 1024, // 64MB
875            max_size: None,
876            growth_factor: 1.5,
877            strategy: AllocationStrategy::FirstFit,
878            enable_coalescing: true,
879            min_block_size: 256,
880            alignment: 16,
881            numa_strategy: None,
882        }
883    }
884}
885
886impl MemoryPoolConfig {
887    /// Create a new memory pool configuration
888    pub fn new(initial_size: usize) -> Self {
889        Self {
890            initial_size,
891            ..Default::default()
892        }
893    }
894
895    /// Set maximum pool size
896    pub fn with_max_size(mut self, max_size: usize) -> Self {
897        self.max_size = Some(max_size);
898        self
899    }
900
901    /// Set growth factor
902    pub fn with_growth_factor(mut self, growth_factor: f32) -> Self {
903        self.growth_factor = growth_factor;
904        self
905    }
906
907    /// Set allocation strategy
908    pub fn with_strategy(mut self, strategy: AllocationStrategy) -> Self {
909        self.strategy = strategy;
910        self
911    }
912
913    /// Set alignment requirement
914    pub fn with_alignment(mut self, alignment: usize) -> Self {
915        self.alignment = alignment;
916        self
917    }
918}
919
920/// Defragmentation result information
921#[derive(Debug, Clone)]
922pub struct DefragmentationResult {
923    /// Number of blocks moved during defragmentation
924    pub blocks_moved: usize,
925
926    /// Amount of memory compacted in bytes
927    pub memory_compacted: usize,
928
929    /// Time taken for defragmentation in milliseconds
930    pub duration_ms: f64,
931
932    /// Fragmentation level before defragmentation (0.0 to 1.0)
933    pub fragmentation_before: f32,
934
935    /// Fragmentation level after defragmentation (0.0 to 1.0)
936    pub fragmentation_after: f32,
937
938    /// Memory efficiency improvement (0.0 to 1.0)
939    pub efficiency_improvement: f32,
940
941    /// Whether defragmentation was successful
942    pub success: bool,
943}
944
945impl DefragmentationResult {
946    /// Check if defragmentation provided significant improvement
947    pub fn is_improvement_significant(&self) -> bool {
948        self.success && self.efficiency_improvement > 0.1 // 10% improvement
949    }
950
951    /// Get compaction ratio (memory_compacted / total_memory)
952    pub fn compaction_ratio(&self, total_memory: usize) -> f32 {
953        if total_memory == 0 {
954            0.0
955        } else {
956            self.memory_compacted as f32 / total_memory as f32
957        }
958    }
959}
960
961/// Compaction result information
962#[derive(Debug, Clone)]
963pub struct CompactionResult {
964    /// Number of allocations moved
965    pub allocations_moved: usize,
966
967    /// Total bytes moved during compaction
968    pub bytes_moved: usize,
969
970    /// Time taken for compaction in milliseconds
971    pub duration_ms: f64,
972
973    /// Largest contiguous free block size before compaction
974    pub largest_free_before: usize,
975
976    /// Largest contiguous free block size after compaction
977    pub largest_free_after: usize,
978
979    /// Number of free blocks before compaction
980    pub free_blocks_before: usize,
981
982    /// Number of free blocks after compaction
983    pub free_blocks_after: usize,
984
985    /// Whether compaction was successful
986    pub success: bool,
987}
988
989impl CompactionResult {
990    /// Calculate free space consolidation improvement
991    pub fn consolidation_improvement(&self) -> f32 {
992        if self.free_blocks_before == 0 {
993            1.0
994        } else {
995            1.0 - (self.free_blocks_after as f32 / self.free_blocks_before as f32)
996        }
997    }
998
999    /// Calculate largest block improvement ratio
1000    pub fn largest_block_improvement(&self) -> f32 {
1001        if self.largest_free_before == 0 {
1002            if self.largest_free_after > 0 {
1003                f32::INFINITY
1004            } else {
1005                0.0
1006            }
1007        } else {
1008            self.largest_free_after as f32 / self.largest_free_before as f32
1009        }
1010    }
1011}
1012
1013/// Detailed memory fragmentation information
1014#[derive(Debug, Clone, Default)]
1015pub struct FragmentationInfo {
1016    /// Overall fragmentation level (0.0 = no fragmentation, 1.0 = maximum fragmentation)
1017    pub overall_fragmentation: f32,
1018
1019    /// External fragmentation (unused space due to allocation patterns)
1020    pub external_fragmentation: f32,
1021
1022    /// Internal fragmentation (wasted space within allocated blocks)
1023    pub internal_fragmentation: f32,
1024
1025    /// Number of free blocks
1026    pub free_blocks: usize,
1027
1028    /// Number of allocated blocks
1029    pub allocated_blocks: usize,
1030
1031    /// Size of largest free block
1032    pub largest_free_block: usize,
1033
1034    /// Size of smallest free block
1035    pub smallest_free_block: usize,
1036
1037    /// Average free block size
1038    pub average_free_block: usize,
1039
1040    /// Total free memory
1041    pub total_free_memory: usize,
1042
1043    /// Total allocated memory
1044    pub total_allocated_memory: usize,
1045
1046    /// Memory utilization efficiency (0.0 to 1.0)
1047    pub utilization_efficiency: f32,
1048
1049    /// Allocation/deallocation pattern efficiency
1050    pub allocation_efficiency: f32,
1051}
1052
1053impl FragmentationInfo {
1054    /// Check if memory is severely fragmented
1055    pub fn is_severely_fragmented(&self) -> bool {
1056        self.overall_fragmentation > 0.7 || self.external_fragmentation > 0.6
1057    }
1058
1059    /// Check if defragmentation would be beneficial
1060    pub fn would_benefit_from_defragmentation(&self) -> bool {
1061        self.is_severely_fragmented()
1062            || (self.free_blocks > 10 && self.utilization_efficiency < 0.8)
1063    }
1064
1065    /// Get fragmentation severity level
1066    pub fn severity_level(&self) -> FragmentationSeverity {
1067        if self.overall_fragmentation < 0.2 {
1068            FragmentationSeverity::Low
1069        } else if self.overall_fragmentation < 0.5 {
1070            FragmentationSeverity::Medium
1071        } else if self.overall_fragmentation < 0.8 {
1072            FragmentationSeverity::High
1073        } else {
1074            FragmentationSeverity::Critical
1075        }
1076    }
1077}
1078
1079/// Fragmentation severity levels
1080#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1081pub enum FragmentationSeverity {
1082    Low,
1083    Medium,
1084    High,
1085    Critical,
1086}
1087
1088/// Defragmentation policy configuration
1089#[derive(Debug, Clone)]
1090pub struct DefragmentationPolicy {
1091    /// Automatic defragmentation trigger threshold (fragmentation level 0.0 to 1.0)
1092    pub auto_trigger_threshold: f32,
1093
1094    /// Minimum time between automatic defragmentations in milliseconds
1095    pub min_interval_ms: u64,
1096
1097    /// Maximum time allowed for defragmentation in milliseconds
1098    pub max_duration_ms: u64,
1099
1100    /// Defragmentation strategy to use
1101    pub strategy: DefragmentationStrategy,
1102
1103    /// Whether to enable background defragmentation
1104    pub enable_background: bool,
1105
1106    /// Priority of defragmentation process
1107    pub priority: DefragmentationPriority,
1108
1109    /// Whether to pause allocations during defragmentation
1110    pub pause_allocations: bool,
1111
1112    /// Memory pressure threshold to trigger emergency defragmentation
1113    pub emergency_threshold: f32,
1114}
1115
1116impl Default for DefragmentationPolicy {
1117    fn default() -> Self {
1118        Self {
1119            auto_trigger_threshold: 0.6,
1120            min_interval_ms: 10_000, // 10 seconds
1121            max_duration_ms: 5_000,  // 5 seconds
1122            strategy: DefragmentationStrategy::Incremental,
1123            enable_background: true,
1124            priority: DefragmentationPriority::Low,
1125            pause_allocations: false,
1126            emergency_threshold: 0.9,
1127        }
1128    }
1129}
1130
1131/// Defragmentation strategies
1132#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1133pub enum DefragmentationStrategy {
1134    /// Compact all memory in one operation
1135    FullCompaction,
1136
1137    /// Incremental defragmentation over time
1138    Incremental,
1139
1140    /// Only move smaller allocations
1141    SmallBlocksOnly,
1142
1143    /// Focus on largest free blocks
1144    LargeBlocksFirst,
1145
1146    /// Minimize movement, focus on coalescing
1147    CoalesceOnly,
1148
1149    /// Use generational approach (move old allocations)
1150    Generational,
1151}
1152
1153/// Defragmentation priority levels
1154#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1155pub enum DefragmentationPriority {
1156    Low,
1157    Normal,
1158    High,
1159    Critical,
1160}
1161
1162/// Memory leak detection report
1163#[derive(Debug, Clone)]
1164pub struct LeakReport {
1165    /// Type of potential leak detected
1166    pub leak_type: LeakType,
1167    /// Number of blocks involved
1168    pub block_count: usize,
1169    /// Total size of potentially leaked memory
1170    pub total_size: usize,
1171    /// Severity of the potential leak
1172    pub severity: LeakSeverity,
1173    /// Human-readable description
1174    pub description: String,
1175}
1176
1177/// Types of memory leaks that can be detected
1178#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1179pub enum LeakType {
1180    /// Too many small allocations that haven't been freed
1181    TooManyAllocations,
1182    /// Large allocation that might be a leak
1183    LargeAllocation,
1184    /// Long-lived allocation that might be forgotten
1185    LongLivedAllocation,
1186    /// Fragmentation causing inefficient memory use
1187    Fragmentation,
1188}
1189
1190/// Severity levels for memory leaks
1191#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1192pub enum LeakSeverity {
1193    Low,
1194    Medium,
1195    High,
1196    Critical,
1197}
1198
1199#[cfg(test)]
1200mod tests {
1201    use super::*;
1202    use crate::device::{Device, DeviceInfo};
1203    use torsh_core::device::DeviceType;
1204
1205    fn create_test_device() -> Device {
1206        let info = DeviceInfo::default();
1207        Device::new(0, DeviceType::Cpu, "Test CPU".to_string(), info)
1208    }
1209
1210    #[test]
1211    fn test_memory_stats_default() {
1212        let stats = MemoryStats::default();
1213
1214        assert_eq!(stats.total_memory, 0);
1215        assert_eq!(stats.allocated_memory, 0);
1216        assert_eq!(stats.available_memory, 0);
1217        assert_eq!(stats.peak_memory, 0);
1218        assert_eq!(stats.active_allocations, 0);
1219        assert_eq!(stats.total_allocations, 0);
1220        assert_eq!(stats.total_deallocations, 0);
1221        assert_eq!(stats.fragmentation, 0.0);
1222        assert_eq!(stats.efficiency, 0.0);
1223    }
1224
1225    #[test]
1226    fn test_memory_stats_utilization() {
1227        let mut stats = MemoryStats {
1228            total_memory: 1000,
1229            allocated_memory: 300,
1230            ..Default::default()
1231        };
1232
1233        assert!((stats.utilization() - 30.0).abs() < 0.001);
1234
1235        stats.total_memory = 0;
1236        assert_eq!(stats.utilization(), 0.0);
1237    }
1238
1239    #[test]
1240    fn test_memory_stats_pressure() {
1241        let mut stats = MemoryStats {
1242            total_memory: 1000,
1243            allocated_memory: 850, // 85% utilization
1244            fragmentation: 0.3,    // 30% fragmentation
1245            ..Default::default()
1246        };
1247
1248        assert!(!stats.is_under_pressure()); // Not quite at 90%
1249
1250        stats.allocated_memory = 950; // 95% utilization
1251        assert!(stats.is_under_pressure()); // Over 90%
1252
1253        stats.allocated_memory = 500; // 50% utilization
1254        stats.fragmentation = 0.6; // 60% fragmentation
1255        assert!(stats.is_under_pressure()); // High fragmentation
1256    }
1257
1258    #[test]
1259    fn test_pool_stats_default() {
1260        let stats = PoolStats::default();
1261
1262        assert_eq!(stats.capacity, 0);
1263        assert_eq!(stats.allocated, 0);
1264        assert_eq!(stats.available, 0);
1265        assert_eq!(stats.free_blocks, 0);
1266        assert_eq!(stats.allocated_blocks, 0);
1267        assert_eq!(stats.largest_free_block, 0);
1268        assert_eq!(stats.smallest_free_block, 0);
1269        assert_eq!(stats.average_free_block, 0);
1270    }
1271
1272    #[test]
1273    fn test_allocation_strategy_variants() {
1274        let strategies = [
1275            AllocationStrategy::FirstFit,
1276            AllocationStrategy::BestFit,
1277            AllocationStrategy::WorstFit,
1278            AllocationStrategy::NextFit,
1279        ];
1280
1281        // Ensure all strategies are distinct
1282        for (i, strategy1) in strategies.iter().enumerate() {
1283            for (j, strategy2) in strategies.iter().enumerate() {
1284                if i != j {
1285                    assert_ne!(strategy1, strategy2);
1286                }
1287            }
1288        }
1289    }
1290
1291    #[test]
1292    fn test_allocation_lifetime_variants() {
1293        let lifetimes = [
1294            AllocationLifetime::Temporary,
1295            AllocationLifetime::Short,
1296            AllocationLifetime::Medium,
1297            AllocationLifetime::Long,
1298            AllocationLifetime::Persistent,
1299        ];
1300
1301        // Ensure all lifetimes are distinct
1302        for (i, lifetime1) in lifetimes.iter().enumerate() {
1303            for (j, lifetime2) in lifetimes.iter().enumerate() {
1304                if i != j {
1305                    assert_ne!(lifetime1, lifetime2);
1306                }
1307            }
1308        }
1309    }
1310
1311    #[test]
1312    fn test_access_pattern_variants() {
1313        let patterns = [
1314            AccessPattern::Random,
1315            AccessPattern::Sequential,
1316            AccessPattern::ReadMostly,
1317            AccessPattern::WriteMostly,
1318            AccessPattern::Streaming,
1319        ];
1320
1321        // Ensure all patterns are distinct
1322        for (i, pattern1) in patterns.iter().enumerate() {
1323            for (j, pattern2) in patterns.iter().enumerate() {
1324                if i != j {
1325                    assert_ne!(pattern1, pattern2);
1326                }
1327            }
1328        }
1329    }
1330
1331    #[test]
1332    fn test_allocation_hint_default() {
1333        let hint = AllocationHint::default();
1334
1335        assert_eq!(hint.lifetime, AllocationLifetime::Medium);
1336        assert_eq!(hint.access_pattern, AccessPattern::Random);
1337        assert_eq!(hint.strategy, AllocationStrategy::FirstFit);
1338        assert!(hint.use_pool);
1339    }
1340
1341    #[test]
1342    fn test_memory_pool_config_default() {
1343        let config = MemoryPoolConfig::default();
1344
1345        assert_eq!(config.initial_size, 64 * 1024 * 1024); // 64MB
1346        assert_eq!(config.max_size, None);
1347        assert_eq!(config.growth_factor, 1.5);
1348        assert_eq!(config.strategy, AllocationStrategy::FirstFit);
1349        assert!(config.enable_coalescing);
1350        assert_eq!(config.min_block_size, 256);
1351        assert_eq!(config.alignment, 16);
1352    }
1353
1354    #[test]
1355    fn test_memory_pool_config_builder() {
1356        let config = MemoryPoolConfig::new(128 * 1024 * 1024) // 128MB
1357            .with_max_size(1024 * 1024 * 1024) // 1GB
1358            .with_growth_factor(2.0)
1359            .with_strategy(AllocationStrategy::BestFit)
1360            .with_alignment(64);
1361
1362        assert_eq!(config.initial_size, 128 * 1024 * 1024);
1363        assert_eq!(config.max_size, Some(1024 * 1024 * 1024));
1364        assert_eq!(config.growth_factor, 2.0);
1365        assert_eq!(config.strategy, AllocationStrategy::BestFit);
1366        assert_eq!(config.alignment, 64);
1367    }
1368
1369    #[test]
1370    fn test_free_list_pool_creation() {
1371        let _device = create_test_device();
1372        let capacity = 1024 * 1024; // 1MB
1373
1374        // Allocate actual memory for the test
1375        let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1376        let ptr = unsafe { std::alloc::alloc(layout) };
1377        assert!(!ptr.is_null());
1378
1379        let pool = FreeListPool::new(ptr, capacity);
1380        assert_eq!(pool.capacity(), capacity);
1381        assert_eq!(pool.available(), capacity);
1382
1383        let stats = pool.stats();
1384        assert_eq!(stats.capacity, capacity);
1385        assert_eq!(stats.available, capacity);
1386        assert_eq!(stats.allocated, 0);
1387        assert_eq!(stats.free_blocks, 1);
1388        assert_eq!(stats.allocated_blocks, 0);
1389        assert_eq!(stats.largest_free_block, capacity);
1390
1391        // Clean up allocated memory
1392        unsafe {
1393            std::alloc::dealloc(ptr, layout);
1394        }
1395    }
1396
1397    #[test]
1398    fn test_free_list_pool_allocation() {
1399        let _device = create_test_device();
1400        let capacity = 1024;
1401
1402        // Allocate actual memory for the test
1403        let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1404        let ptr = unsafe { std::alloc::alloc(layout) };
1405        assert!(!ptr.is_null());
1406
1407        let mut pool = FreeListPool::new(ptr, capacity);
1408
1409        // Allocate some memory
1410        let ptr1 = pool.allocate(256, 16);
1411        assert!(ptr1.is_ok());
1412
1413        let stats = pool.stats();
1414        assert_eq!(stats.allocated, 256);
1415        assert!(stats.available < capacity); // Should be less due to alignment
1416        assert_eq!(stats.allocated_blocks, 1);
1417
1418        // Allocate more memory
1419        let ptr2 = pool.allocate(128, 16);
1420        assert!(ptr2.is_ok());
1421
1422        let stats = pool.stats();
1423        assert_eq!(stats.allocated, 256 + 128);
1424        assert_eq!(stats.allocated_blocks, 2);
1425
1426        // Try to allocate more than available
1427        let ptr3 = pool.allocate(1024, 16);
1428        assert!(ptr3.is_err());
1429
1430        // Clean up allocated memory
1431        unsafe {
1432            std::alloc::dealloc(ptr, layout);
1433        }
1434    }
1435
1436    #[test]
1437    fn test_free_list_pool_deallocation() {
1438        let _device = create_test_device();
1439        let capacity = 1024;
1440
1441        // Allocate actual memory for the test
1442        let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1443        let ptr = unsafe { std::alloc::alloc(layout) };
1444        assert!(!ptr.is_null());
1445
1446        let mut pool = FreeListPool::new(ptr, capacity);
1447
1448        // Allocate some memory
1449        let ptr1 = pool.allocate(256, 16).unwrap();
1450        let ptr2 = pool.allocate(128, 16).unwrap();
1451
1452        assert_eq!(pool.stats().allocated_blocks, 2);
1453
1454        // Deallocate first allocation
1455        let result = pool.deallocate(ptr1, 256);
1456        assert!(result.is_ok());
1457
1458        let stats = pool.stats();
1459        assert_eq!(stats.allocated, 128);
1460        assert_eq!(stats.allocated_blocks, 1);
1461
1462        // Deallocate second allocation
1463        let result = pool.deallocate(ptr2, 128);
1464        assert!(result.is_ok());
1465
1466        let stats = pool.stats();
1467        assert_eq!(stats.allocated, 0);
1468        assert_eq!(stats.allocated_blocks, 0);
1469
1470        // Clean up allocated memory
1471        unsafe {
1472            std::alloc::dealloc(ptr, layout);
1473        }
1474    }
1475
1476    #[test]
1477    fn test_free_list_pool_reset() {
1478        let _device = create_test_device();
1479        let capacity = 1024;
1480
1481        // Allocate actual memory for the test
1482        let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1483        let ptr = unsafe { std::alloc::alloc(layout) };
1484        assert!(!ptr.is_null());
1485
1486        let mut pool = FreeListPool::new(ptr, capacity);
1487
1488        // Allocate some memory
1489        let _ptr1 = pool.allocate(256, 16).unwrap();
1490        let _ptr2 = pool.allocate(128, 16).unwrap();
1491
1492        assert_eq!(pool.stats().allocated_blocks, 2);
1493
1494        // Reset the pool
1495        let result = pool.reset();
1496        assert!(result.is_ok());
1497
1498        let stats = pool.stats();
1499        assert_eq!(stats.allocated, 0);
1500        assert_eq!(stats.allocated_blocks, 0);
1501        assert_eq!(stats.free_blocks, 1);
1502        assert_eq!(stats.available, capacity);
1503        assert_eq!(stats.largest_free_block, capacity);
1504
1505        // Clean up allocated memory
1506        unsafe {
1507            std::alloc::dealloc(ptr, layout);
1508        }
1509    }
1510
1511    #[test]
1512    fn test_free_list_pool_find_free_block() {
1513        let _device = create_test_device();
1514        let capacity = 1024;
1515
1516        // Allocate actual memory for the test
1517        let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1518        let ptr = unsafe { std::alloc::alloc(layout) };
1519        assert!(!ptr.is_null());
1520
1521        let pool = FreeListPool::new(ptr, capacity);
1522
1523        // Should find a block for reasonable allocation
1524        let block_idx = pool.find_free_block(256, 16);
1525        assert!(block_idx.is_some());
1526        assert_eq!(block_idx.unwrap(), 0); // First (and only) block
1527
1528        // Should not find a block for oversized allocation
1529        let block_idx = pool.find_free_block(2048, 16);
1530        assert!(block_idx.is_none());
1531
1532        // Clean up allocated memory
1533        unsafe {
1534            std::alloc::dealloc(ptr, layout);
1535        }
1536    }
1537}