1#![allow(unsafe_code)]
12#![allow(clippy::expect_used)]
14#![allow(clippy::arc_with_non_send_sync)]
16
17use crate::error::{OxiGdalError, Result};
18use parking_lot::{Mutex, RwLock};
19use std::alloc::{Layout, alloc, dealloc};
20use std::collections::{BTreeMap, HashMap};
21use std::ptr::NonNull;
22use std::sync::Arc;
23use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
24
25pub const MIN_ALIGNMENT: usize = 16;
27
28pub const MAX_BLOCK_SIZE: usize = 16 * 1024 * 1024;
30
31pub const SLAB_SIZES: &[usize] = &[
33 256, 1024, 4096, 16384, 65536, 262_144, 1_048_576, 4_194_304, ];
42
43#[derive(Debug, Default)]
45pub struct AllocatorStats {
46 pub total_allocations: AtomicU64,
48 pub total_deallocations: AtomicU64,
50 pub bytes_allocated: AtomicUsize,
52 pub peak_bytes_allocated: AtomicUsize,
54 pub allocation_failures: AtomicU64,
56 pub slab_hits: AtomicU64,
58 pub slab_misses: AtomicU64,
60}
61
62impl AllocatorStats {
63 #[must_use]
65 pub fn new() -> Self {
66 Self::default()
67 }
68
69 pub fn record_allocation(&self, size: usize) {
71 self.total_allocations.fetch_add(1, Ordering::Relaxed);
72 let new_allocated = self.bytes_allocated.fetch_add(size, Ordering::Relaxed) + size;
73
74 let mut peak = self.peak_bytes_allocated.load(Ordering::Relaxed);
76 while new_allocated > peak {
77 match self.peak_bytes_allocated.compare_exchange_weak(
78 peak,
79 new_allocated,
80 Ordering::Relaxed,
81 Ordering::Relaxed,
82 ) {
83 Ok(_) => break,
84 Err(x) => peak = x,
85 }
86 }
87 }
88
89 pub fn record_deallocation(&self, size: usize) {
91 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
92 self.bytes_allocated.fetch_sub(size, Ordering::Relaxed);
93 }
94
95 pub fn record_failure(&self) {
97 self.allocation_failures.fetch_add(1, Ordering::Relaxed);
98 }
99
100 pub fn record_slab_hit(&self) {
102 self.slab_hits.fetch_add(1, Ordering::Relaxed);
103 }
104
105 pub fn record_slab_miss(&self) {
107 self.slab_misses.fetch_add(1, Ordering::Relaxed);
108 }
109
110 pub fn current_allocations(&self) -> u64 {
112 self.total_allocations.load(Ordering::Relaxed)
113 - self.total_deallocations.load(Ordering::Relaxed)
114 }
115
116 pub fn slab_hit_rate(&self) -> f64 {
118 let hits = self.slab_hits.load(Ordering::Relaxed);
119 let misses = self.slab_misses.load(Ordering::Relaxed);
120 let total = hits + misses;
121 if total == 0 {
122 0.0
123 } else {
124 hits as f64 / total as f64
125 }
126 }
127}
128
129#[allow(dead_code)]
131struct SlabBlock {
132 ptr: NonNull<u8>,
133 size: usize,
134}
135
136impl Drop for SlabBlock {
137 fn drop(&mut self) {
138 unsafe {
141 let layout = Layout::from_size_align_unchecked(self.size, MIN_ALIGNMENT);
142 dealloc(self.ptr.as_ptr(), layout);
143 }
144 }
145}
146
147pub struct SlabAllocator {
149 free_lists: Arc<RwLock<HashMap<usize, Vec<NonNull<u8>>>>>,
151 stats: Arc<AllocatorStats>,
153 #[cfg(debug_assertions)]
155 allocated_blocks: Arc<Mutex<HashMap<usize, Vec<NonNull<u8>>>>>,
156}
157
158unsafe impl Send for SlabAllocator {}
161
162unsafe impl Sync for SlabAllocator {}
165
166impl SlabAllocator {
167 #[must_use]
169 pub fn new() -> Self {
170 let mut free_lists = HashMap::new();
171 for &size in SLAB_SIZES {
172 free_lists.insert(size, Vec::new());
173 }
174
175 Self {
176 free_lists: Arc::new(RwLock::new(free_lists)),
177 stats: Arc::new(AllocatorStats::new()),
178 #[cfg(debug_assertions)]
179 allocated_blocks: Arc::new(Mutex::new(HashMap::new())),
180 }
181 }
182
183 pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
185 let slab_size = SLAB_SIZES
187 .iter()
188 .find(|&&s| s >= size)
189 .copied()
190 .ok_or_else(|| {
191 self.stats.record_slab_miss();
192 OxiGdalError::invalid_parameter(
193 "parameter",
194 format!(
195 "Size {} exceeds maximum slab size {}",
196 size,
197 SLAB_SIZES.last().copied().unwrap_or(0)
198 ),
199 )
200 })?;
201
202 {
204 let mut free_lists = self.free_lists.write();
205 if let Some(blocks) = free_lists.get_mut(&slab_size) {
206 if let Some(ptr) = blocks.pop() {
207 self.stats.record_slab_hit();
208 self.stats.record_allocation(slab_size);
209
210 #[cfg(debug_assertions)]
211 {
212 let mut allocated = self.allocated_blocks.lock();
213 allocated.entry(slab_size).or_default().push(ptr);
214 }
215
216 return Ok(ptr);
217 }
218 }
219 }
220
221 self.stats.record_slab_miss();
223 let layout = Layout::from_size_align(slab_size, MIN_ALIGNMENT)
224 .map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
225
226 let ptr = unsafe {
229 let raw_ptr = alloc(layout);
230 if raw_ptr.is_null() {
231 self.stats.record_failure();
232 return Err(OxiGdalError::allocation_error(
233 "Failed to allocate memory".to_string(),
234 ));
235 }
236 NonNull::new_unchecked(raw_ptr)
237 };
238
239 self.stats.record_allocation(slab_size);
240
241 #[cfg(debug_assertions)]
242 {
243 let mut allocated = self.allocated_blocks.lock();
244 allocated.entry(slab_size).or_default().push(ptr);
245 }
246
247 Ok(ptr)
248 }
249
250 pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
252 let slab_size = SLAB_SIZES
254 .iter()
255 .find(|&&s| s >= size)
256 .copied()
257 .ok_or_else(|| {
258 OxiGdalError::invalid_parameter(
259 "parameter",
260 format!("Size {size} exceeds maximum slab size"),
261 )
262 })?;
263
264 #[cfg(debug_assertions)]
265 {
266 let mut allocated = self.allocated_blocks.lock();
267 if let Some(blocks) = allocated.get_mut(&slab_size) {
268 if let Some(pos) = blocks.iter().position(|&p| p == ptr) {
269 blocks.swap_remove(pos);
270 } else {
271 return Err(OxiGdalError::invalid_parameter(
272 "parameter",
273 "Block not found in allocated list".to_string(),
274 ));
275 }
276 } else {
277 return Err(OxiGdalError::invalid_parameter(
278 "parameter",
279 "Invalid slab size for deallocation".to_string(),
280 ));
281 }
282 }
283
284 let mut free_lists = self.free_lists.write();
286 free_lists.entry(slab_size).or_default().push(ptr);
287
288 self.stats.record_deallocation(slab_size);
289 Ok(())
290 }
291
292 #[must_use]
294 pub fn stats(&self) -> Arc<AllocatorStats> {
295 Arc::clone(&self.stats)
296 }
297
298 #[cfg(debug_assertions)]
300 pub fn check_leaks(&self) -> Result<()> {
301 let allocated = self.allocated_blocks.lock();
302 let mut total_leaks = 0;
303 for (size, blocks) in allocated.iter() {
304 if !blocks.is_empty() {
305 eprintln!(
306 "Memory leak detected: {} blocks of size {} bytes",
307 blocks.len(),
308 size
309 );
310 total_leaks += blocks.len();
311 }
312 }
313
314 if total_leaks > 0 {
315 Err(OxiGdalError::invalid_state(format!(
316 "Detected {total_leaks} memory leaks"
317 )))
318 } else {
319 Ok(())
320 }
321 }
322}
323
324impl Default for SlabAllocator {
325 fn default() -> Self {
326 Self::new()
327 }
328}
329
330pub struct BuddyAllocator {
332 free_lists: Arc<Mutex<BTreeMap<usize, Vec<NonNull<u8>>>>>,
334 min_block_size: usize,
336 max_block_size: usize,
338 stats: Arc<AllocatorStats>,
340}
341
342unsafe impl Send for BuddyAllocator {}
345
346unsafe impl Sync for BuddyAllocator {}
349
350impl BuddyAllocator {
351 pub fn new(min_block_size: usize, max_block_size: usize) -> Result<Self> {
353 if !min_block_size.is_power_of_two() || !max_block_size.is_power_of_two() {
354 return Err(OxiGdalError::invalid_parameter(
355 "parameter",
356 "Block sizes must be powers of 2".to_string(),
357 ));
358 }
359
360 if min_block_size > max_block_size {
361 return Err(OxiGdalError::invalid_parameter(
362 "parameter",
363 "Min block size must be <= max block size".to_string(),
364 ));
365 }
366
367 Ok(Self {
368 free_lists: Arc::new(Mutex::new(BTreeMap::new())),
369 min_block_size,
370 max_block_size,
371 stats: Arc::new(AllocatorStats::new()),
372 })
373 }
374
375 pub fn with_defaults() -> Result<Self> {
381 Self::new(MIN_ALIGNMENT, MAX_BLOCK_SIZE)
382 }
383
384 fn next_power_of_two(&self, size: usize) -> usize {
386 if size <= self.min_block_size {
387 self.min_block_size
388 } else {
389 size.next_power_of_two().min(self.max_block_size)
390 }
391 }
392
393 pub fn allocate(&self, size: usize) -> Result<NonNull<u8>> {
395 let block_size = self.next_power_of_two(size);
396
397 if block_size > self.max_block_size {
398 self.stats.record_failure();
399 return Err(OxiGdalError::allocation_error(format!(
400 "Requested size {} exceeds maximum block size {}",
401 size, self.max_block_size
402 )));
403 }
404
405 let mut free_lists = self.free_lists.lock();
407
408 let mut found_size = None;
410 for (&list_size, blocks) in free_lists.range(block_size..) {
411 if !blocks.is_empty() {
412 found_size = Some(list_size);
413 break;
414 }
415 }
416
417 if let Some(found_size) = found_size {
418 let blocks = free_lists
420 .get_mut(&found_size)
421 .ok_or_else(|| OxiGdalError::invalid_state("Free list disappeared".to_string()))?;
422 let ptr = blocks
423 .pop()
424 .ok_or_else(|| OxiGdalError::invalid_state("Block disappeared".to_string()))?;
425
426 let mut current_size = found_size;
428 while current_size > block_size {
429 current_size /= 2;
430 if current_size >= block_size {
431 let buddy_ptr =
435 unsafe { NonNull::new_unchecked(ptr.as_ptr().add(current_size)) };
436 free_lists.entry(current_size).or_default().push(buddy_ptr);
437 }
438 }
439
440 self.stats.record_allocation(block_size);
441 Ok(ptr)
442 } else {
443 drop(free_lists);
445
446 let layout = Layout::from_size_align(block_size, block_size)
447 .map_err(|e| OxiGdalError::allocation_error(e.to_string()))?;
448
449 let ptr = unsafe {
452 let raw_ptr = alloc(layout);
453 if raw_ptr.is_null() {
454 self.stats.record_failure();
455 return Err(OxiGdalError::allocation_error(
456 "Failed to allocate memory".to_string(),
457 ));
458 }
459 NonNull::new_unchecked(raw_ptr)
460 };
461
462 self.stats.record_allocation(block_size);
463 Ok(ptr)
464 }
465 }
466
467 pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
469 let block_size = self.next_power_of_two(size);
470
471 let mut free_lists = self.free_lists.lock();
472 free_lists.entry(block_size).or_default().push(ptr);
473
474 self.stats.record_deallocation(block_size);
475 Ok(())
476 }
477
478 #[must_use]
480 pub fn stats(&self) -> Arc<AllocatorStats> {
481 Arc::clone(&self.stats)
482 }
483}
484
485pub struct ThreadLocalAllocator {
487 slab: SlabAllocator,
489 buddy: BuddyAllocator,
491 slab_threshold: usize,
493}
494
495unsafe impl Send for ThreadLocalAllocator {}
498
499unsafe impl Sync for ThreadLocalAllocator {}
502
503impl ThreadLocalAllocator {
504 pub fn new() -> Result<Self> {
510 Ok(Self {
511 slab: SlabAllocator::new(),
512 buddy: BuddyAllocator::with_defaults()?,
513 slab_threshold: *SLAB_SIZES.last().ok_or_else(|| OxiGdalError::Internal {
514 message: "SLAB_SIZES is empty".to_string(),
515 })?,
516 })
517 }
518
519 pub fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
521 if alignment > MIN_ALIGNMENT && alignment > size {
522 return Err(OxiGdalError::invalid_parameter(
523 "parameter",
524 "Alignment requirements not supported".to_string(),
525 ));
526 }
527
528 if size <= self.slab_threshold {
529 self.slab.allocate(size)
530 } else {
531 self.buddy.allocate(size)
532 }
533 }
534
535 pub fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
537 if size <= self.slab_threshold {
538 self.slab.deallocate(ptr, size)
539 } else {
540 self.buddy.deallocate(ptr, size)
541 }
542 }
543
544 #[must_use]
546 pub fn stats(&self) -> (Arc<AllocatorStats>, Arc<AllocatorStats>) {
547 (self.slab.stats(), self.buddy.stats())
548 }
549}
550
551pub trait Allocator: Send + Sync {
553 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>>;
555
556 fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()>;
558
559 fn stats(&self) -> Arc<AllocatorStats>;
561}
562
563impl Allocator for SlabAllocator {
564 fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
565 self.allocate(size)
566 }
567
568 fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
569 self.deallocate(ptr, size)
570 }
571
572 fn stats(&self) -> Arc<AllocatorStats> {
573 self.stats()
574 }
575}
576
577impl Allocator for BuddyAllocator {
578 fn allocate(&self, size: usize, _alignment: usize) -> Result<NonNull<u8>> {
579 self.allocate(size)
580 }
581
582 fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
583 self.deallocate(ptr, size)
584 }
585
586 fn stats(&self) -> Arc<AllocatorStats> {
587 self.stats()
588 }
589}
590
591impl Allocator for ThreadLocalAllocator {
592 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
593 self.allocate(size, alignment)
594 }
595
596 fn deallocate(&self, ptr: NonNull<u8>, size: usize) -> Result<()> {
597 self.deallocate(ptr, size)
598 }
599
600 fn stats(&self) -> Arc<AllocatorStats> {
601 self.slab.stats()
603 }
604}
605
606#[cfg(test)]
607#[allow(useless_ptr_null_checks)]
608mod tests {
609 use super::*;
610
611 #[test]
612 fn test_slab_allocator() {
613 let allocator = SlabAllocator::new();
614
615 let ptr1 = allocator
617 .allocate(100)
618 .expect("Slab allocator should allocate 100 bytes");
619 assert!(!ptr1.as_ptr().is_null());
620
621 let ptr2 = allocator
623 .allocate(200)
624 .expect("Slab allocator should allocate 200 bytes");
625 assert!(!ptr2.as_ptr().is_null());
626 assert_ne!(ptr1, ptr2);
627
628 allocator
630 .deallocate(ptr1, 100)
631 .expect("Deallocation should succeed");
632 allocator
633 .deallocate(ptr2, 200)
634 .expect("Deallocation should succeed");
635
636 let stats = allocator.stats();
638 assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
639 assert_eq!(stats.total_deallocations.load(Ordering::Relaxed), 2);
640 }
641
642 #[test]
643 fn test_buddy_allocator() {
644 let allocator =
645 BuddyAllocator::with_defaults().expect("Default buddy allocator should be created");
646
647 let ptr1 = allocator
649 .allocate(1000)
650 .expect("Buddy allocator should allocate 1000 bytes");
651 let ptr2 = allocator
652 .allocate(2000)
653 .expect("Buddy allocator should allocate 2000 bytes");
654
655 assert!(!ptr1.as_ptr().is_null());
656 assert!(!ptr2.as_ptr().is_null());
657
658 allocator
660 .deallocate(ptr1, 1000)
661 .expect("Buddy deallocation should succeed");
662 allocator
663 .deallocate(ptr2, 2000)
664 .expect("Buddy deallocation should succeed");
665 }
666
667 #[test]
668 fn test_thread_local_allocator() {
669 let allocator =
670 ThreadLocalAllocator::new().expect("Thread-local allocator should be created");
671
672 let ptr1 = allocator
674 .allocate(256, MIN_ALIGNMENT)
675 .expect("Thread-local allocator should allocate small block");
676 assert!(!ptr1.as_ptr().is_null());
677
678 let ptr2 = allocator
680 .allocate(1024 * 1024, MIN_ALIGNMENT)
681 .expect("Thread-local allocator should allocate large block");
682 assert!(!ptr2.as_ptr().is_null());
683
684 allocator
686 .deallocate(ptr1, 256)
687 .expect("Thread-local deallocation should succeed");
688 allocator
689 .deallocate(ptr2, 1024 * 1024)
690 .expect("Thread-local deallocation should succeed");
691 }
692
693 #[test]
694 fn test_allocator_stats() {
695 let stats = AllocatorStats::new();
696
697 stats.record_allocation(1024);
698 stats.record_allocation(2048);
699
700 assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
701 assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 3072);
702 assert_eq!(stats.peak_bytes_allocated.load(Ordering::Relaxed), 3072);
703
704 stats.record_deallocation(1024);
705 assert_eq!(stats.bytes_allocated.load(Ordering::Relaxed), 2048);
706 assert_eq!(stats.current_allocations(), 1);
707 }
708}