1#[cfg(not(feature = "no-std"))]
7use std::alloc::{GlobalAlloc, Layout, System};
8#[cfg(not(feature = "no-std"))]
9use std::ptr::{self, NonNull};
10#[cfg(not(feature = "no-std"))]
11use std::sync::atomic::{AtomicUsize, Ordering};
12#[cfg(not(feature = "no-std"))]
13use std::{mem, slice};
14
15#[cfg(feature = "no-std")]
16use core::alloc::{GlobalAlloc, Layout};
17#[cfg(feature = "no-std")]
18use core::ptr::{self, NonNull};
19#[cfg(feature = "no-std")]
20use core::sync::atomic::{AtomicUsize, Ordering};
21#[cfg(feature = "no-std")]
22use core::{mem, slice};
23#[cfg(feature = "no-std")]
24extern crate alloc;
25#[cfg(feature = "no-std")]
26use alloc::alloc as global_alloc;
27#[cfg(feature = "no-std")]
28use alloc::vec::Vec;
29
30#[derive(Debug, Default)]
32pub struct AllocatorStats {
33 pub total_allocations: AtomicUsize,
34 pub total_deallocations: AtomicUsize,
35 pub bytes_allocated: AtomicUsize,
36 pub bytes_deallocated: AtomicUsize,
37 pub aligned_allocations: AtomicUsize,
38 pub peak_memory_usage: AtomicUsize,
39}
40
41impl AllocatorStats {
42 pub fn new() -> Self {
43 Self::default()
44 }
45
46 pub fn record_allocation(&self, size: usize, aligned: bool) {
47 self.total_allocations.fetch_add(1, Ordering::Relaxed);
48 self.bytes_allocated.fetch_add(size, Ordering::Relaxed);
49
50 if aligned {
51 self.aligned_allocations.fetch_add(1, Ordering::Relaxed);
52 }
53
54 let current_usage = self.current_memory_usage();
56 let mut peak = self.peak_memory_usage.load(Ordering::Relaxed);
57 while current_usage > peak {
58 match self.peak_memory_usage.compare_exchange_weak(
59 peak,
60 current_usage,
61 Ordering::Relaxed,
62 Ordering::Relaxed,
63 ) {
64 Ok(_) => break,
65 Err(new_peak) => peak = new_peak,
66 }
67 }
68 }
69
70 pub fn record_deallocation(&self, size: usize) {
71 self.total_deallocations.fetch_add(1, Ordering::Relaxed);
72 self.bytes_deallocated.fetch_add(size, Ordering::Relaxed);
73 }
74
75 pub fn current_memory_usage(&self) -> usize {
76 let allocated = self.bytes_allocated.load(Ordering::Relaxed);
77 let deallocated = self.bytes_deallocated.load(Ordering::Relaxed);
78 allocated.saturating_sub(deallocated)
79 }
80
81 pub fn allocation_efficiency(&self) -> f64 {
82 let total_allocs = self.total_allocations.load(Ordering::Relaxed);
83 let aligned_allocs = self.aligned_allocations.load(Ordering::Relaxed);
84
85 if total_allocs == 0 {
86 1.0
87 } else {
88 aligned_allocs as f64 / total_allocs as f64
89 }
90 }
91}
92
93pub struct SimdAllocator {
95 stats: AllocatorStats,
96 default_alignment: usize,
97}
98
99impl SimdAllocator {
100 pub const fn new() -> Self {
102 Self::with_alignment(32)
103 }
104
105 pub const fn with_alignment(alignment: usize) -> Self {
107 Self {
108 stats: AllocatorStats {
109 total_allocations: AtomicUsize::new(0),
110 total_deallocations: AtomicUsize::new(0),
111 bytes_allocated: AtomicUsize::new(0),
112 bytes_deallocated: AtomicUsize::new(0),
113 aligned_allocations: AtomicUsize::new(0),
114 peak_memory_usage: AtomicUsize::new(0),
115 },
116 default_alignment: alignment,
117 }
118 }
119
120 pub fn stats(&self) -> &AllocatorStats {
122 &self.stats
123 }
124
125 pub fn allocate_simd<T>(&self, count: usize) -> Option<NonNull<T>> {
127 let size = count * mem::size_of::<T>();
128 let align = self.default_alignment.max(mem::align_of::<T>());
129
130 let layout = Layout::from_size_align(size, align).ok()?;
131
132 #[cfg(not(feature = "no-std"))]
134 let ptr = unsafe { System.alloc(layout) };
135 #[cfg(feature = "no-std")]
136 let ptr = unsafe { global_alloc::alloc(layout) };
137
138 if ptr.is_null() {
139 None
140 } else {
141 self.stats.record_allocation(size, true);
142 NonNull::new(ptr.cast())
143 }
144 }
145
146 pub unsafe fn deallocate_simd<T>(&self, ptr: NonNull<T>, count: usize) {
148 let size = count * mem::size_of::<T>();
149 let align = self.default_alignment.max(mem::align_of::<T>());
150
151 if let Ok(layout) = Layout::from_size_align(size, align) {
152 #[cfg(not(feature = "no-std"))]
153 System.dealloc(ptr.cast().as_ptr(), layout);
154 #[cfg(feature = "no-std")]
155 global_alloc::dealloc(ptr.cast().as_ptr(), layout);
156 self.stats.record_deallocation(size);
157 }
158 }
159
160 pub fn allocate_zeroed_simd<T>(&self, count: usize) -> Option<NonNull<T>>
162 where
163 T: Copy,
164 {
165 let size = count * mem::size_of::<T>();
166 let align = self.default_alignment.max(mem::align_of::<T>());
167
168 let layout = Layout::from_size_align(size, align).ok()?;
169
170 #[cfg(not(feature = "no-std"))]
171 let ptr = unsafe { System.alloc_zeroed(layout) };
172 #[cfg(feature = "no-std")]
173 let ptr = unsafe { global_alloc::alloc_zeroed(layout) };
174
175 if ptr.is_null() {
176 None
177 } else {
178 self.stats.record_allocation(size, true);
179 NonNull::new(ptr.cast())
180 }
181 }
182
183 pub unsafe fn reallocate_simd<T>(
185 &self,
186 ptr: NonNull<T>,
187 old_count: usize,
188 new_count: usize,
189 ) -> Option<NonNull<T>> {
190 let old_size = old_count * mem::size_of::<T>();
191 let new_size = new_count * mem::size_of::<T>();
192 let align = self.default_alignment.max(mem::align_of::<T>());
193
194 let old_layout = Layout::from_size_align(old_size, align).ok()?;
195
196 #[cfg(not(feature = "no-std"))]
197 let new_ptr = System.realloc(ptr.cast().as_ptr(), old_layout, new_size);
198 #[cfg(feature = "no-std")]
199 let new_ptr = global_alloc::realloc(ptr.cast().as_ptr(), old_layout, new_size);
200
201 if new_ptr.is_null() {
202 None
203 } else {
204 self.stats.record_deallocation(old_size);
205 self.stats.record_allocation(new_size, true);
206 NonNull::new(new_ptr.cast())
207 }
208 }
209}
210
211impl Default for SimdAllocator {
212 fn default() -> Self {
213 Self::new()
214 }
215}
216
217unsafe impl GlobalAlloc for SimdAllocator {
218 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
219 #[cfg(not(feature = "no-std"))]
220 let ptr = System.alloc(layout);
221 #[cfg(feature = "no-std")]
222 let ptr = global_alloc::alloc(layout);
223 if !ptr.is_null() {
224 let is_aligned = layout.align() >= self.default_alignment;
225 self.stats.record_allocation(layout.size(), is_aligned);
226 }
227 ptr
228 }
229
230 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
231 #[cfg(not(feature = "no-std"))]
232 System.dealloc(ptr, layout);
233 #[cfg(feature = "no-std")]
234 global_alloc::dealloc(ptr, layout);
235 self.stats.record_deallocation(layout.size());
236 }
237
238 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
239 #[cfg(not(feature = "no-std"))]
240 let ptr = System.alloc_zeroed(layout);
241 #[cfg(feature = "no-std")]
242 let ptr = global_alloc::alloc_zeroed(layout);
243 if !ptr.is_null() {
244 let is_aligned = layout.align() >= self.default_alignment;
245 self.stats.record_allocation(layout.size(), is_aligned);
246 }
247 ptr
248 }
249
250 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
251 #[cfg(not(feature = "no-std"))]
252 let new_ptr = System.realloc(ptr, layout, new_size);
253 #[cfg(feature = "no-std")]
254 let new_ptr = global_alloc::realloc(ptr, layout, new_size);
255 if !new_ptr.is_null() {
256 self.stats.record_deallocation(layout.size());
257 self.stats
258 .record_allocation(new_size, layout.align() >= self.default_alignment);
259 }
260 new_ptr
261 }
262}
263
264pub struct SimdVec<T> {
266 ptr: Option<NonNull<T>>,
267 len: usize,
268 capacity: usize,
269 allocator: SimdAllocator,
270}
271
272impl<T> SimdVec<T> {
273 pub fn new() -> Self {
275 Self::with_allocator(SimdAllocator::new())
276 }
277
278 pub fn with_allocator(allocator: SimdAllocator) -> Self {
280 Self {
281 ptr: None,
282 len: 0,
283 capacity: 0,
284 allocator,
285 }
286 }
287
288 pub fn with_capacity(capacity: usize) -> Self {
290 let mut vec = Self::new();
291 vec.reserve(capacity);
292 vec
293 }
294
295 pub fn reserve(&mut self, additional: usize) {
297 let new_capacity = self.len.checked_add(additional).expect("Capacity overflow");
298
299 if new_capacity <= self.capacity {
300 return;
301 }
302
303 let new_capacity = new_capacity.next_power_of_two().max(4);
304
305 if let Some(old_ptr) = self.ptr {
306 let new_ptr = unsafe {
308 self.allocator
309 .reallocate_simd(old_ptr, self.capacity, new_capacity)
310 };
311
312 if let Some(new_ptr) = new_ptr {
313 self.ptr = Some(new_ptr);
314 self.capacity = new_capacity;
315 } else {
316 panic!("Failed to reallocate SIMD memory");
317 }
318 } else {
319 let new_ptr = self.allocator.allocate_simd::<T>(new_capacity);
321
322 if let Some(new_ptr) = new_ptr {
323 self.ptr = Some(new_ptr);
324 self.capacity = new_capacity;
325 } else {
326 panic!("Failed to allocate SIMD memory");
327 }
328 }
329 }
330
331 pub fn push(&mut self, value: T) {
333 if self.len == self.capacity {
334 self.reserve(1);
335 }
336
337 unsafe {
338 let ptr = self
339 .ptr
340 .expect("Vector should have allocated memory")
341 .as_ptr();
342 ptr::write(ptr.add(self.len), value);
343 }
344
345 self.len += 1;
346 }
347
348 pub fn pop(&mut self) -> Option<T> {
350 if self.len == 0 {
351 None
352 } else {
353 self.len -= 1;
354 unsafe {
355 let ptr = self
356 .ptr
357 .expect("Vector should have allocated memory")
358 .as_ptr();
359 Some(ptr::read(ptr.add(self.len)))
360 }
361 }
362 }
363
364 pub fn len(&self) -> usize {
366 self.len
367 }
368
369 pub fn is_empty(&self) -> bool {
371 self.len == 0
372 }
373
374 pub fn capacity(&self) -> usize {
376 self.capacity
377 }
378
379 pub fn as_slice(&self) -> &[T] {
381 if let Some(ptr) = self.ptr {
382 unsafe { slice::from_raw_parts(ptr.as_ptr(), self.len) }
383 } else {
384 &[]
385 }
386 }
387
388 pub fn as_mut_slice(&mut self) -> &mut [T] {
390 if let Some(ptr) = self.ptr {
391 unsafe { slice::from_raw_parts_mut(ptr.as_ptr(), self.len) }
392 } else {
393 &mut []
394 }
395 }
396
397 pub fn clear(&mut self) {
399 if mem::needs_drop::<T>() {
400 for i in 0..self.len {
401 unsafe {
402 let ptr = self
403 .ptr
404 .expect("Vector should have allocated memory")
405 .as_ptr();
406 ptr::drop_in_place(ptr.add(i));
407 }
408 }
409 }
410 self.len = 0;
411 }
412
413 pub fn allocator_stats(&self) -> &AllocatorStats {
415 self.allocator.stats()
416 }
417
418 pub fn is_simd_aligned(&self) -> bool {
420 if let Some(ptr) = self.ptr {
421 let addr = ptr.as_ptr() as usize;
422 addr % self.allocator.default_alignment == 0
423 } else {
424 true }
426 }
427}
428
429impl<T> Default for SimdVec<T> {
430 fn default() -> Self {
431 Self::new()
432 }
433}
434
435impl<T> Drop for SimdVec<T> {
436 fn drop(&mut self) {
437 self.clear();
438
439 if let Some(ptr) = self.ptr.take() {
440 unsafe {
441 self.allocator.deallocate_simd(ptr, self.capacity);
442 }
443 }
444 }
445}
446
447impl<T: Clone> Clone for SimdVec<T> {
448 fn clone(&self) -> Self {
449 let mut new_vec = Self::with_allocator(SimdAllocator::with_alignment(
450 self.allocator.default_alignment,
451 ));
452
453 new_vec.reserve(self.len);
454
455 for item in self.as_slice() {
456 new_vec.push(item.clone());
457 }
458
459 new_vec
460 }
461}
462
463pub struct SimdMemoryPool<T> {
465 free_blocks: Vec<(NonNull<T>, usize)>, allocator: SimdAllocator,
467 block_size: usize,
468}
469
470impl<T> SimdMemoryPool<T> {
471 pub fn new(block_size: usize) -> Self {
472 Self {
473 free_blocks: Vec::new(),
474 allocator: SimdAllocator::new(),
475 block_size,
476 }
477 }
478
479 pub fn acquire(&mut self, min_capacity: usize) -> Option<(NonNull<T>, usize)> {
480 for (i, (_ptr, capacity)) in self.free_blocks.iter().enumerate() {
482 if *capacity >= min_capacity {
483 return Some(self.free_blocks.swap_remove(i));
484 }
485 }
486
487 let capacity = min_capacity.max(self.block_size);
489 let ptr = self.allocator.allocate_simd(capacity)?;
490 Some((ptr, capacity))
491 }
492
493 pub fn release(&mut self, ptr: NonNull<T>, capacity: usize) {
494 self.free_blocks.push((ptr, capacity));
495 }
496
497 pub fn clear(&mut self) {
498 for (ptr, capacity) in self.free_blocks.drain(..) {
499 unsafe {
500 self.allocator.deallocate_simd(ptr, capacity);
501 }
502 }
503 }
504
505 pub fn stats(&self) -> &AllocatorStats {
506 self.allocator.stats()
507 }
508}
509
510impl<T> Drop for SimdMemoryPool<T> {
511 fn drop(&mut self) {
512 self.clear();
513 }
514}
515
516#[allow(non_snake_case)]
517#[cfg(all(test, not(feature = "no-std")))]
518mod tests {
519 use super::*;
520
521 #[cfg(feature = "no-std")]
522 use alloc::{vec, vec::Vec};
523
524 #[test]
525 fn test_simd_allocator_basic() {
526 let allocator = SimdAllocator::new();
527
528 let ptr = allocator.allocate_simd::<f32>(16);
529 assert!(ptr.is_some());
530
531 if let Some(ptr) = ptr {
532 let addr = ptr.as_ptr() as usize;
534 assert_eq!(addr % 32, 0, "Memory should be 32-byte aligned");
535
536 unsafe {
537 allocator.deallocate_simd(ptr, 16);
538 }
539 }
540
541 let stats = allocator.stats();
542 assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 1);
543 assert_eq!(stats.total_deallocations.load(Ordering::Relaxed), 1);
544 }
545
546 #[test]
547 fn test_simd_vec_basic_operations() {
548 let mut vec = SimdVec::<i32>::new();
549
550 assert!(vec.is_empty());
551 assert_eq!(vec.len(), 0);
552 assert!(vec.is_simd_aligned());
553
554 vec.push(1);
555 vec.push(2);
556 vec.push(3);
557
558 assert_eq!(vec.len(), 3);
559 assert!(!vec.is_empty());
560 assert_eq!(vec.as_slice(), &[1, 2, 3]);
561
562 assert_eq!(vec.pop(), Some(3));
563 assert_eq!(vec.len(), 2);
564
565 vec.clear();
566 assert!(vec.is_empty());
567 }
568
569 #[test]
570 fn test_simd_vec_capacity_growth() {
571 let mut vec = SimdVec::<u64>::new();
572
573 for i in 0..100 {
574 vec.push(i);
575 }
576
577 assert_eq!(vec.len(), 100);
578 assert!(vec.capacity() >= 100);
579 assert!(vec.is_simd_aligned());
580
581 for (i, &value) in vec.as_slice().iter().enumerate() {
583 assert_eq!(value, i as u64);
584 }
585 }
586
587 #[test]
588 fn test_simd_vec_with_capacity() {
589 let vec = SimdVec::<f64>::with_capacity(50);
590
591 assert_eq!(vec.len(), 0);
592 assert!(vec.capacity() >= 50);
593 assert!(vec.is_simd_aligned());
594 }
595
596 #[test]
597 fn test_allocator_stats() {
598 let allocator = SimdAllocator::new();
599
600 let ptr1 = allocator
601 .allocate_simd::<f32>(16)
602 .expect("operation should succeed");
603 let ptr2 = allocator
604 .allocate_simd::<f64>(8)
605 .expect("operation should succeed");
606
607 let stats = allocator.stats();
608 assert_eq!(stats.total_allocations.load(Ordering::Relaxed), 2);
609 assert_eq!(stats.aligned_allocations.load(Ordering::Relaxed), 2);
610 assert!(stats.current_memory_usage() > 0);
611 assert_eq!(stats.allocation_efficiency(), 1.0);
612
613 unsafe {
614 allocator.deallocate_simd(ptr1, 16);
615 allocator.deallocate_simd(ptr2, 8);
616 }
617
618 assert_eq!(stats.total_deallocations.load(Ordering::Relaxed), 2);
619 }
620
621 #[test]
622 fn test_memory_pool() {
623 let mut pool = SimdMemoryPool::<i32>::new(64);
624
625 let (ptr1, cap1) = pool.acquire(32).expect("operation should succeed");
626 assert!(cap1 >= 32);
627
628 let (ptr2, cap2) = pool.acquire(16).expect("operation should succeed");
629 assert!(cap2 >= 16);
630
631 pool.release(ptr1, cap1);
632
633 let (ptr3, cap3) = pool.acquire(30).expect("operation should succeed");
635 assert_eq!(ptr3, ptr1);
636 assert_eq!(cap3, cap1);
637
638 pool.release(ptr2, cap2);
639 pool.release(ptr3, cap3);
640 }
641
642 #[test]
643 fn test_zeroed_allocation() {
644 let allocator = SimdAllocator::new();
645
646 let ptr = allocator
647 .allocate_zeroed_simd::<u32>(16)
648 .expect("operation should succeed");
649
650 unsafe {
651 let slice = slice::from_raw_parts(ptr.as_ptr(), 16);
652 for &value in slice {
653 assert_eq!(value, 0);
654 }
655
656 allocator.deallocate_simd(ptr, 16);
657 }
658 }
659
660 #[test]
661 fn test_custom_alignment() {
662 let allocator = SimdAllocator::with_alignment(64); let ptr = allocator.allocate_simd::<f32>(16);
665 assert!(ptr.is_some());
666
667 if let Some(ptr) = ptr {
668 let addr = ptr.as_ptr() as usize;
669 assert_eq!(addr % 64, 0, "Memory should be 64-byte aligned");
670
671 unsafe {
672 allocator.deallocate_simd(ptr, 16);
673 }
674 }
675 }
676
677 #[test]
678 fn test_simd_vec_clone() {
679 let mut vec1 = SimdVec::<i32>::new();
680 vec1.push(1);
681 vec1.push(2);
682 vec1.push(3);
683
684 let vec2 = vec1.clone();
685
686 assert_eq!(vec1.as_slice(), vec2.as_slice());
687 assert!(vec2.is_simd_aligned());
688 }
689}