#![allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct MemoryLayout {
pub alignment: usize,
pub size: usize,
pub stride: usize,
}
impl MemoryLayout {
#[must_use]
pub fn new(alignment: usize, size: usize, stride: usize) -> Self {
let stride = if stride == 0 { size } else { stride };
Self {
alignment,
size,
stride,
}
}
#[must_use]
pub fn is_aligned(&self, ptr: usize) -> bool {
if self.alignment == 0 {
return true;
}
ptr % self.alignment == 0
}
#[must_use]
pub fn padded_size(&self) -> usize {
if self.alignment == 0 {
return self.size;
}
self.size.div_ceil(self.alignment) * self.alignment
}
}
#[derive(Debug)]
pub struct AlignedBuffer {
data: Vec<u8>,
pub layout: MemoryLayout,
}
impl AlignedBuffer {
#[must_use]
pub fn new(size: usize, align: usize) -> Self {
let layout = MemoryLayout::new(align, size, size);
Self {
data: vec![0u8; size],
layout,
}
}
#[must_use]
pub fn as_slice(&self) -> &[u8] {
&self.data
}
#[must_use]
pub fn as_mut_slice(&mut self) -> &mut [u8] {
&mut self.data
}
#[must_use]
pub fn capacity(&self) -> usize {
self.layout.size
}
}
pub struct MemoryPool {
buffers: Vec<AlignedBuffer>,
free_list: Vec<usize>,
}
impl MemoryPool {
#[must_use]
pub fn new(count: usize, size: usize, align: usize) -> Self {
let buffers: Vec<AlignedBuffer> = (0..count)
.map(|_| AlignedBuffer::new(size, align))
.collect();
let free_list: Vec<usize> = (0..count).collect();
Self { buffers, free_list }
}
pub fn allocate(&mut self, size: usize) -> Option<usize> {
let pos = self
.free_list
.iter()
.position(|&idx| self.buffers[idx].capacity() >= size)?;
let idx = self.free_list.remove(pos);
Some(idx)
}
pub fn deallocate(&mut self, idx: usize) {
if idx < self.buffers.len() && !self.free_list.contains(&idx) {
self.free_list.push(idx);
}
}
#[must_use]
pub fn available(&self) -> usize {
self.free_list.len()
}
#[must_use]
pub fn buffer(&self, idx: usize) -> Option<&AlignedBuffer> {
self.buffers.get(idx)
}
#[must_use]
pub fn buffer_mut(&mut self, idx: usize) -> Option<&mut AlignedBuffer> {
self.buffers.get_mut(idx)
}
}
pub struct RingAllocator {
buffer: Vec<u8>,
head: usize,
tail: usize,
used: usize,
}
impl RingAllocator {
#[must_use]
pub fn new(capacity: usize) -> Self {
Self {
buffer: vec![0u8; capacity],
head: 0,
tail: 0,
used: 0,
}
}
pub fn allocate(&mut self, size: usize) -> Option<usize> {
if size == 0 || size > self.buffer.len() {
return None;
}
if size > self.free_space() {
return None;
}
let capacity = self.buffer.len();
let offset = self.tail;
self.tail = (self.tail + size) % capacity;
self.used += size;
Some(offset)
}
#[must_use]
pub fn free_space(&self) -> usize {
self.buffer.len().saturating_sub(self.used)
}
pub fn reset(&mut self) {
self.head = 0;
self.tail = 0;
self.used = 0;
self.buffer.fill(0);
}
#[must_use]
pub fn capacity(&self) -> usize {
self.buffer.len()
}
pub fn free(&mut self, size: usize) {
let capacity = self.buffer.len();
self.head = (self.head + size) % capacity;
self.used = self.used.saturating_sub(size);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_aligned_true() {
let layout = MemoryLayout::new(16, 64, 64);
assert!(layout.is_aligned(0));
assert!(layout.is_aligned(16));
assert!(layout.is_aligned(32));
}
#[test]
fn test_is_aligned_false() {
let layout = MemoryLayout::new(16, 64, 64);
assert!(!layout.is_aligned(1));
assert!(!layout.is_aligned(15));
}
#[test]
fn test_padded_size_exact() {
let layout = MemoryLayout::new(16, 64, 64);
assert_eq!(layout.padded_size(), 64);
}
#[test]
fn test_padded_size_with_padding() {
let layout = MemoryLayout::new(16, 60, 60);
assert_eq!(layout.padded_size(), 64);
}
#[test]
fn test_padded_size_zero_alignment() {
let layout = MemoryLayout::new(0, 60, 60);
assert_eq!(layout.padded_size(), 60);
}
#[test]
fn test_aligned_buffer_new() {
let buf = AlignedBuffer::new(256, 16);
assert_eq!(buf.capacity(), 256);
assert_eq!(buf.as_slice().len(), 256);
assert!(buf.as_slice().iter().all(|&b| b == 0));
}
#[test]
fn test_aligned_buffer_write() {
let mut buf = AlignedBuffer::new(16, 8);
buf.as_mut_slice()[0] = 42;
assert_eq!(buf.as_slice()[0], 42);
}
#[test]
fn test_pool_allocate_success() {
let mut pool = MemoryPool::new(4, 1024, 16);
assert_eq!(pool.available(), 4);
let idx = pool.allocate(512).expect("allocate should succeed");
assert_eq!(pool.available(), 3);
assert!(pool.buffer(idx).is_some());
}
#[test]
fn test_pool_allocate_exhausted() {
let mut pool = MemoryPool::new(1, 64, 8);
let _ = pool.allocate(64).expect("allocate should succeed");
assert!(pool.allocate(64).is_none());
}
#[test]
fn test_pool_deallocate() {
let mut pool = MemoryPool::new(2, 128, 16);
let idx = pool.allocate(64).expect("allocate should succeed");
assert_eq!(pool.available(), 1);
pool.deallocate(idx);
assert_eq!(pool.available(), 2);
}
#[test]
fn test_pool_size_too_large() {
let mut pool = MemoryPool::new(2, 128, 16);
let result = pool.allocate(256);
assert!(result.is_none());
}
#[test]
fn test_ring_sequential() {
let mut ring = RingAllocator::new(256);
assert_eq!(ring.allocate(64), Some(0));
assert_eq!(ring.allocate(64), Some(64));
assert_eq!(ring.allocate(64), Some(128));
}
#[test]
fn test_ring_free_space() {
let mut ring = RingAllocator::new(256);
let _ = ring.allocate(100);
assert_eq!(ring.free_space(), 156);
}
#[test]
fn test_ring_reset() {
let mut ring = RingAllocator::new(128);
let _ = ring.allocate(64);
ring.reset();
assert_eq!(ring.free_space(), 128);
assert_eq!(ring.allocate(128), Some(0));
}
#[test]
fn test_ring_full() {
let mut ring = RingAllocator::new(64);
let _ = ring.allocate(64);
assert!(ring.allocate(1).is_none());
}
#[test]
fn test_ring_free_reclaims() {
let mut ring = RingAllocator::new(128);
let _ = ring.allocate(64);
ring.free(64);
assert_eq!(ring.free_space(), 128);
}
}