Skip to main content

oxigdal_core/memory/
allocator.rs

1//! Custom Memory Allocators for Geospatial Data
2//!
3//! This module provides specialized allocators optimized for geospatial workloads:
4//! - Slab allocator for fixed-size blocks (tiles)
5//! - Buddy allocator for variable-size blocks
6//! - Thread-local allocation pools
7//! - Allocation tracking and statistics
8//! - Memory leak detection in debug mode
9
10// Unsafe code is necessary for custom allocators
11#![allow(unsafe_code)]
12// Default impls use expect() for configuration errors - acceptable here
13#![allow(clippy::expect_used)]
14// Arc with custom allocator types that have unsafe Send+Sync impls
15#![allow(clippy::arc_with_non_send_sync)]
16
17use crate::error::{OxiGdalError, Result};
18use parking_lot::{Mutex, RwLock};
19use std::alloc::{Layout, alloc, dealloc};
20use std::collections::{BTreeMap, HashMap};
21use std::ptr::NonNull;
22use std::sync::Arc;
23use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
24
25/// Minimum alignment for all allocations
26pub const MIN_ALIGNMENT: usize = 16;
27
28/// Maximum block size for buddy allocator (16MB)
29pub const MAX_BLOCK_SIZE: usize = 16 * 1024 * 1024;
30
31/// Slab sizes for common geospatial tile dimensions
32pub const SLAB_SIZES: &[usize] = &[
33    256,       // 16x16 bytes
34    1024,      // 32x32 bytes
35    4096,      // 64x64 bytes or 4KB page
36    16384,     // 128x128 bytes
37    65536,     // 256x256 bytes or 64KB
38    262_144,   // 512x512 bytes or 256KB
39    1_048_576, // 1024x1024 bytes or 1MB
40    4_194_304, // 2048x2048 bytes or 4MB
41];
42
43/// Statistics for allocator performance tracking
44#[derive(Debug, Default)]
45pub struct AllocatorStats {
46    /// Total number of allocations
47    pub total_allocations: AtomicU64,
48    /// Total number of deallocations
49    pub total_deallocations: AtomicU64,
50    /// Current bytes allocated
51    pub bytes_allocated: AtomicUsize,
52    /// Peak bytes allocated
53    pub peak_bytes_allocated: AtomicUsize,
54    /// Number of allocation failures
55    pub allocation_failures: AtomicU64,
56    /// Number of slab hits
57    pub slab_hits: AtomicU64,
58    /// Number of slab misses
59    pub slab_misses: AtomicU64,
60}
61
62impl AllocatorStats {
63    /// Create new statistics
64    #[must_use]
65    pub fn new() -> Self {
66        Self::default()
67    }
68
69    /// Record an allocation
70    pub fn record_allocation(&self, size: usize) {
71        self.total_allocations.fetch_add(1, Ordering::Relaxed);
72        let new_allocated = self.bytes_allocated.fetch_add(size, Ordering::Relaxed) + size;
73
74        // Update peak if necessary
75        let mut peak = self.peak_bytes_allocated.load(Ordering::Relaxed);
76        while new_allocated > peak {
77            match self.peak_bytes_allocated.compare_exchange_weak(
78                peak,
79                new_allocated,
80                Ordering::Relaxed,
81                Ordering::Relaxed,
82            ) {
83                Ok(_) => break,
84                Err(x) => peak = x,
85            }
86        }
87    }
88
89    /// Record a deallocation
90    pub fn record_deallocation(&self, size: usize) {
91        self.total_deallocations.fetch_add(1, Ordering::Relaxed);
92        self.bytes_allocated.fetch_sub(size, Ordering::Relaxed);
93    }
94
95    /// Record an allocation failure
96    pub fn record_failure(&self) {
97        self.allocation_failures.fetch_add(1, Ordering::Relaxed);
98    }
99
100    /// Record a slab hit
101    pub fn record_slab_hit(&self) {
102        self.slab_hits.fetch_add(1, Ordering::Relaxed);
103    }
104
105    /// Record a slab miss
106    pub fn record_slab_miss(&self) {
107        self.slab_misses.fetch_add(1, Ordering::Relaxed);
108    }
109
110    /// Get current allocation count
111    pub fn current_allocations(&self) -> u64 {
112        self.total_allocations.load(Ordering::Relaxed)
113            - self.total_deallocations.load(Ordering::Relaxed)
114    }
115
116    /// Get slab hit rate
117    pub fn slab_hit_rate(&self) -> f64 {
118        let hits = self.slab_hits.load(Ordering::Relaxed);
119        let misses = self.slab_misses.load(Ordering::Relaxed);
120        let total = hits + misses;
121        if total == 0 {
122            0.0
123        } else {
124            hits as f64 / total as f64
125        }
126    }
127}
128
129/// A block in the slab allocator
130#[allow(dead_code)]
131struct SlabBlock {
132    ptr: NonNull<u8>,
133    size: usize,
134}
135
136impl Drop for SlabBlock {
137    fn drop(&mut self) {
138        // SAFETY: The layout matches the one used during allocation.
139        // The pointer is valid and aligned, and we have exclusive ownership.
140        unsafe {
141            let layout = Layout::from_size_align_unchecked(self.size, MIN_ALIGNMENT);
142            dealloc(self.ptr.as_ptr(), layout);
143        }
144    }
145}
146
147/// Slab allocator for fixed-size blocks
148pub struct SlabAllocator {
149    /// Available blocks for each size class
150    free_lists: Arc<RwLock<HashMap<usize, Vec<NonNull<u8>>>>>,
151    /// Statistics
152    stats: Arc<AllocatorStats>,
153    /// Block tracking for leak detection in debug mode
154    #[cfg(debug_assertions)]
155    allocated_blocks: Arc<Mutex<HashMap<usize, Vec<NonNull<u8>>>>>,
156}
157
158// SAFETY: SlabAllocator is Send because all NonNull pointers are protected
159// by Arc<RwLock> or Arc<Mutex>, providing proper synchronization
160unsafe impl Send for SlabAllocator {}
161
162// SAFETY: SlabAllocator is Sync because all NonNull pointers are protected
163// by Arc<RwLock> or Arc<Mutex>, providing proper synchronization
164unsafe impl Sync for SlabAllocator {}
165
166impl SlabAllocator {
167    /// Create a new slab allocator
168    #[must_use]
169    pub fn new() -> Self {
170        let mut free_lists = HashMap::new();
171        for &size in SLAB_SIZES {
172            free_lists.insert(size, Vec::new());
173        }
174
175        Self {
176            free_lists: Arc::new(RwLock::new(free_lists)),
177            stats: Arc::new(AllocatorStats::new()),
178            #[cfg(debug_assertions)]
179            allocated_blocks: Arc::new(Mutex::new(HashMap::new())),
180        }
181    }
182
183    /// Allocate a block of the given size
184    pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
185        // Find the appropriate slab size
186        let slab_size = SLAB_SIZES
187            .iter()
188            .find(|&&s| s >= size)
189            .copied()
190            .ok_or_else(|| {
191                self.stats.record_slab_miss();
192                OxiGdalError::invalid_parameter(
193                    "parameter",
194                    format!(
195                        "Size {} exceeds maximum slab size {}",
196                        size,
197                        SLAB_SIZES.last().copied().unwrap_or(0)
198                    ),
199                )
200            })?;
201
202        // Try to get a block from the free list
203        {
204            let mut free_lists = self.free_lists.write();
205            if let Some(blocks) = free_lists.get_mut(&slab_size) {
206                if let Some(ptr) = blocks.pop() {
207                    self.stats.record_slab_hit();
208                    self.stats.record_allocation(slab_size);
209
210                    #[cfg(debug_assertions)]
211                    {
212                        let mut allocated = self.allocated_blocks.lock();
213                        allocated.entry(slab_size).or_default().push(ptr);
214                    }
215
216                    return Ok(ptr);
217                }
218            }
219        }
220
221        // No free block available, allocate a new one
222        self.stats.record_slab_miss();
223        let layout = Layout::from_size_align(slab_size, MIN_ALIGNMENT)
224            .map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
225
226        // SAFETY: We've validated the layout and check for null after allocation.
227        // NonNull::new_unchecked is safe because we've verified the pointer is non-null.
228        let ptr = unsafe {
229            let raw_ptr = alloc(layout);
230            if raw_ptr.is_null() {
231                self.stats.record_failure();
232                return Err(OxiGdalError::allocation_error(
233                    "Failed to allocate memory".to_string(),
234                ));
235            }
236            NonNull::new_unchecked(raw_ptr)
237        };
238
239        self.stats.record_allocation(slab_size);
240
241        #[cfg(debug_assertions)]
242        {
243            let mut allocated = self.allocated_blocks.lock();
244            allocated.entry(slab_size).or_default().push(ptr);
245        }
246
247        Ok(ptr)
248    }
249
250    /// Deallocate a block
251    pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
252        // Find the appropriate slab size
253        let slab_size = SLAB_SIZES
254            .iter()
255            .find(|&&s| s >= size)
256            .copied()
257            .ok_or_else(|| {
258                OxiGdalError::invalid_parameter(
259                    "parameter",
260                    format!("Size {size} exceeds maximum slab size"),
261                )
262            })?;
263
264        #[cfg(debug_assertions)]
265        {
266            let mut allocated = self.allocated_blocks.lock();
267            if let Some(blocks) = allocated.get_mut(&slab_size) {
268                if let Some(pos) = blocks.iter().position(|&p| p == ptr) {
269                    blocks.swap_remove(pos);
270                } else {
271                    return Err(OxiGdalError::invalid_parameter(
272                        "parameter",
273                        "Block not found in allocated list".to_string(),
274                    ));
275                }
276            } else {
277                return Err(OxiGdalError::invalid_parameter(
278                    "parameter",
279                    "Invalid slab size for deallocation".to_string(),
280                ));
281            }
282        }
283
284        // Return the block to the free list
285        let mut free_lists = self.free_lists.write();
286        free_lists.entry(slab_size).or_default().push(ptr);
287
288        self.stats.record_deallocation(slab_size);
289        Ok(())
290    }
291
292    /// Get statistics
293    #[must_use]
294    pub fn stats(&self) -> Arc<AllocatorStats> {
295        Arc::clone(&self.stats)
296    }
297
298    /// Check for memory leaks (debug mode only)
299    #[cfg(debug_assertions)]
300    pub fn check_leaks(&self) -> Result<()> {
301        let allocated = self.allocated_blocks.lock();
302        let mut total_leaks = 0;
303        for (size, blocks) in allocated.iter() {
304            if !blocks.is_empty() {
305                eprintln!(
306                    "Memory leak detected: {} blocks of size {} bytes",
307                    blocks.len(),
308                    size
309                );
310                total_leaks += blocks.len();
311            }
312        }
313
314        if total_leaks > 0 {
315            Err(OxiGdalError::invalid_state(format!(
316                "Detected {total_leaks} memory leaks"
317            )))
318        } else {
319            Ok(())
320        }
321    }
322}
323
324impl Default for SlabAllocator {
325    fn default() -> Self {
326        Self::new()
327    }
328}
329
330/// Buddy allocator for variable-size blocks
331pub struct BuddyAllocator {
332    /// Free lists for each order (power of 2)
333    free_lists: Arc<Mutex<BTreeMap<usize, Vec<NonNull<u8>>>>>,
334    /// Minimum block size (power of 2)
335    min_block_size: usize,
336    /// Maximum block size (power of 2)
337    max_block_size: usize,
338    /// Statistics
339    stats: Arc<AllocatorStats>,
340}
341
342// SAFETY: BuddyAllocator is Send because all NonNull pointers are protected
343// by Arc<Mutex>, providing proper synchronization
344unsafe impl Send for BuddyAllocator {}
345
346// SAFETY: BuddyAllocator is Sync because all NonNull pointers are protected
347// by Arc<Mutex>, providing proper synchronization
348unsafe impl Sync for BuddyAllocator {}
349
350impl BuddyAllocator {
351    /// Create a new buddy allocator
352    pub fn new(min_block_size: usize, max_block_size: usize) -> Result<Self> {
353        if !min_block_size.is_power_of_two() || !max_block_size.is_power_of_two() {
354            return Err(OxiGdalError::invalid_parameter(
355                "parameter",
356                "Block sizes must be powers of 2".to_string(),
357            ));
358        }
359
360        if min_block_size > max_block_size {
361            return Err(OxiGdalError::invalid_parameter(
362                "parameter",
363                "Min block size must be <= max block size".to_string(),
364            ));
365        }
366
367        Ok(Self {
368            free_lists: Arc::new(Mutex::new(BTreeMap::new())),
369            min_block_size,
370            max_block_size,
371            stats: Arc::new(AllocatorStats::new()),
372        })
373    }
374
375    /// Create with default sizes
376    ///
377    /// # Errors
378    ///
379    /// Returns an error if allocation fails
380    pub fn with_defaults() -> Result<Self> {
381        Self::new(MIN_ALIGNMENT, MAX_BLOCK_SIZE)
382    }
383
384    /// Round up to next power of 2
385    fn next_power_of_two(&self, size: usize) -> usize {
386        if size <= self.min_block_size {
387            self.min_block_size
388        } else {
389            size.next_power_of_two().min(self.max_block_size)
390        }
391    }
392
393    /// Allocate a block
394    pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
395        let block_size = self.next_power_of_two(size);
396
397        if block_size > self.max_block_size {
398            self.stats.record_failure();
399            return Err(OxiGdalError::allocation_error(format!(
400                "Requested size {} exceeds maximum block size {}",
401                size, self.max_block_size
402            )));
403        }
404
405        // Try to find a free block
406        let mut free_lists = self.free_lists.lock();
407
408        // Look for a block of the exact size or larger
409        let mut found_size = None;
410        for (&list_size, blocks) in free_lists.range(block_size..) {
411            if !blocks.is_empty() {
412                found_size = Some(list_size);
413                break;
414            }
415        }
416
417        if let Some(found_size) = found_size {
418            // Take a block from the free list
419            let blocks = free_lists
420                .get_mut(&found_size)
421                .ok_or_else(|| OxiGdalError::invalid_state("Free list disappeared".to_string()))?;
422            let ptr = blocks
423                .pop()
424                .ok_or_else(|| OxiGdalError::invalid_state("Block disappeared".to_string()))?;
425
426            // Split the block if it's larger than needed
427            let mut current_size = found_size;
428            while current_size > block_size {
429                current_size /= 2;
430                if current_size >= block_size {
431                    // Create buddy block
432                    // SAFETY: The pointer arithmetic is within the allocated block bounds.
433                    // current_size is guaranteed to be less than the original allocation size.
434                    let buddy_ptr =
435                        unsafe { NonNull::new_unchecked(ptr.as_ptr().add(current_size)) };
436                    free_lists.entry(current_size).or_default().push(buddy_ptr);
437                }
438            }
439
440            self.stats.record_allocation(block_size);
441            Ok(ptr)
442        } else {
443            // No free block available, allocate a new one
444            drop(free_lists);
445
446            let layout = Layout::from_size_align(block_size, block_size)
447                .map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
448
449            // SAFETY: Layout is valid and we check for null before creating NonNull.
450            // The pointer will be properly aligned according to the layout.
451            let ptr = unsafe {
452                let raw_ptr = alloc(layout);
453                if raw_ptr.is_null() {
454                    self.stats.record_failure();
455                    return Err(OxiGdalError::allocation_error(
456                        "Failed to allocate memory".to_string(),
457                    ));
458                }
459                NonNull::new_unchecked(raw_ptr)
460            };
461
462            self.stats.record_allocation(block_size);
463            Ok(ptr)
464        }
465    }
466
467    /// Deallocate a block
468    pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
469        let block_size = self.next_power_of_two(size);
470
471        let mut free_lists = self.free_lists.lock();
472        free_lists.entry(block_size).or_default().push(ptr);
473
474        self.stats.record_deallocation(block_size);
475        Ok(())
476    }
477
478    /// Get statistics
479    #[must_use]
480    pub fn stats(&self) -> Arc<AllocatorStats> {
481        Arc::clone(&self.stats)
482    }
483}
484
485/// Thread-local allocator pool
486pub struct ThreadLocalAllocator {
487    /// Slab allocator for small fixed-size allocations
488    slab: SlabAllocator,
489    /// Buddy allocator for larger variable-size allocations
490    buddy: BuddyAllocator,
491    /// Threshold for using slab vs buddy
492    slab_threshold: usize,
493}
494
495// SAFETY: ThreadLocalAllocator is Send because it contains SlabAllocator and
496// BuddyAllocator which are both Send
497unsafe impl Send for ThreadLocalAllocator {}
498
499// SAFETY: ThreadLocalAllocator is Sync because it contains SlabAllocator and
500// BuddyAllocator which are both Sync
501unsafe impl Sync for ThreadLocalAllocator {}
502
503impl ThreadLocalAllocator {
504    /// Create a new thread-local allocator
505    ///
506    /// # Errors
507    ///
508    /// Returns an error if allocator initialization fails
509    pub fn new() -> Result<Self> {
510        Ok(Self {
511            slab: SlabAllocator::new(),
512            buddy: BuddyAllocator::with_defaults()?,
513            slab_threshold: *SLAB_SIZES.last().ok_or_else(|| OxiGdalError::Internal {
514                message: "SLAB_SIZES is empty".to_string(),
515            })?,
516        })
517    }
518
519    /// Allocate memory
520    pub fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
521        if alignment > MIN_ALIGNMENT && alignment > size {
522            return Err(OxiGdalError::invalid_parameter(
523                "parameter",
524                "Alignment requirements not supported".to_string(),
525            ));
526        }
527
528        if size <= self.slab_threshold {
529            self.slab.allocate(size)
530        } else {
531            self.buddy.allocate(size)
532        }
533    }
534
535    /// Deallocate memory
536    pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
537        if size <= self.slab_threshold {
538            self.slab.deallocate(ptr, size)
539        } else {
540            self.buddy.deallocate(ptr, size)
541        }
542    }
543
544    /// Get combined statistics
545    #[must_use]
546    pub fn stats(&self) -> (Arc<AllocatorStats>, Arc<AllocatorStats>) {
547        (self.slab.stats(), self.buddy.stats())
548    }
549}
550
551/// Generic allocator trait
552pub trait Allocator: Send + Sync {
553    /// Allocate memory
554    fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>>;
555
556    /// Deallocate memory
557    fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()>;
558
559    /// Get statistics
560    fn stats(&self) -> Arc<AllocatorStats>;
561}
562
563impl Allocator for SlabAllocator {
564    fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
565        self.allocate(size)
566    }
567
568    fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
569        self.deallocate(ptr, size)
570    }
571
572    fn stats(&self) -> Arc<AllocatorStats> {
573        self.stats()
574    }
575}
576
577impl Allocator for BuddyAllocator {
578    fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
579        self.allocate(size)
580    }
581
582    fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
583        self.deallocate(ptr, size)
584    }
585
586    fn stats(&self) -> Arc<AllocatorStats> {
587        self.stats()
588    }
589}
590
591impl Allocator for ThreadLocalAllocator {
592    fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
593        self.allocate(size, alignment)
594    }
595
596    fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
597        self.deallocate(ptr, size)
598    }
599
600    fn stats(&self) -> Arc<AllocatorStats> {
601        // Return slab stats for now
602        self.slab.stats()
603    }
604}
605
606#[cfg(test)]
607#[allow(useless_ptr_null_checks)]
608mod tests {
609    use super::*;
610
611    #[test]
612    fn test_slab_allocator() {
613        let allocator = SlabAllocator::new();
614
615        // Allocate a small block
616        let ptr1 = allocator
617            .allocate(100)
618            .expect("Slab allocator should allocate 100 bytes");
619        assert!(!ptr1.as_ptr().is_null());
620
621        // Allocate another block
622        let ptr2 = allocator
623            .allocate(200)
624            .expect("Slab allocator should allocate 200 bytes");
625        assert!(!ptr2.as_ptr().is_null());
626        assert_ne!(ptr1, ptr2);
627
628        // Deallocate
629        allocator
630            .deallocate(ptr1, 100)
631            .expect("Deallocation should succeed");
632        allocator
633            .deallocate(ptr2, 200)
634            .expect("Deallocation should succeed");
635
636        // Check stats
637        let stats = allocator.stats();
638        assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
639        assert_eq!(stats.total_deallocations.load(Ordering::Relaxed), 2);
640    }
641
642    #[test]
643    fn test_buddy_allocator() {
644        let allocator =
645            BuddyAllocator::with_defaults().expect("Default buddy allocator should be created");
646
647        // Allocate blocks
648        let ptr1 = allocator
649            .allocate(1000)
650            .expect("Buddy allocator should allocate 1000 bytes");
651        let ptr2 = allocator
652            .allocate(2000)
653            .expect("Buddy allocator should allocate 2000 bytes");
654
655        assert!(!ptr1.as_ptr().is_null());
656        assert!(!ptr2.as_ptr().is_null());
657
658        // Deallocate
659        allocator
660            .deallocate(ptr1, 1000)
661            .expect("Buddy deallocation should succeed");
662        allocator
663            .deallocate(ptr2, 2000)
664            .expect("Buddy deallocation should succeed");
665    }
666
667    #[test]
668    fn test_thread_local_allocator() {
669        let allocator =
670            ThreadLocalAllocator::new().expect("Thread-local allocator should be created");
671
672        // Test small allocation (slab)
673        let ptr1 = allocator
674            .allocate(256, MIN_ALIGNMENT)
675            .expect("Thread-local allocator should allocate small block");
676        assert!(!ptr1.as_ptr().is_null());
677
678        // Test large allocation (buddy)
679        let ptr2 = allocator
680            .allocate(1024 * 1024, MIN_ALIGNMENT)
681            .expect("Thread-local allocator should allocate large block");
682        assert!(!ptr2.as_ptr().is_null());
683
684        // Deallocate
685        allocator
686            .deallocate(ptr1, 256)
687            .expect("Thread-local deallocation should succeed");
688        allocator
689            .deallocate(ptr2, 1024 * 1024)
690            .expect("Thread-local deallocation should succeed");
691    }
692
693    #[test]
694    fn test_allocator_stats() {
695        let stats = AllocatorStats::new();
696
697        stats.record_allocation(1024);
698        stats.record_allocation(2048);
699
700        assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
701        assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 3072);
702        assert_eq!(stats.peak_bytes_allocated.load(Ordering::Relaxed), 3072);
703
704        stats.record_deallocation(1024);
705        assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 2048);
706        assert_eq!(stats.current_allocations(), 1);
707    }
708}