use crate::core::types::Point2f;
use std::alloc::{Layout, alloc, dealloc};
use std::sync::{Arc, Mutex, OnceLock};
#[derive(Debug)]
pub struct MemoryManager {
buffer_pools: Arc<Mutex<BufferPools>>,
stats: Arc<Mutex<MemoryStats>>,
config: MemoryConfig,
}
#[derive(Debug)]
struct BufferPools {
f32_buffers: BufferPool<f32>,
f64_buffers: BufferPool<f64>,
u8_buffers: BufferPool<u8>,
u32_buffers: BufferPool<u32>,
point_buffers: BufferPool<Point2f>,
block_pool: Arc<Mutex<BlockPool>>,
}
#[derive(Debug)]
struct BufferPool<T> {
available: Vec<Vec<T>>,
allocated_count: usize,
max_pool_size: usize,
min_capacity: usize,
}
#[derive(Debug)]
struct BlockPool {
blocks: Vec<MemoryBlock>,
stats: BlockStats,
}
#[derive(Debug, Clone)]
struct MemoryBlock {
ptr: *mut u8,
size: usize,
layout: Layout,
}
unsafe impl Send for MemoryBlock {}
#[derive(Debug, Clone)]
pub struct MemoryStats {
pub total_allocated: usize,
pub total_deallocated: usize,
pub current_usage: usize,
pub peak_usage: usize,
pub active_allocations: usize,
pub pool_hit_rate: f32,
pub pool_stats: PoolStats,
}
#[derive(Debug, Clone)]
pub struct PoolStats {
pub f32_pool_size: usize,
pub f64_pool_size: usize,
pub u8_pool_size: usize,
pub u32_pool_size: usize,
pub point_pool_size: usize,
pub block_pool_size: usize,
pub total_pool_memory: usize,
}
#[derive(Debug, Clone)]
struct BlockStats {
total_blocks_allocated: usize,
total_blocks_reused: usize,
peak_block_count: usize,
}
#[derive(Debug, Clone)]
pub struct MemoryConfig {
pub enable_pooling: bool,
pub max_pool_size: usize,
pub min_pool_capacity: usize,
pub pre_allocate_common_sizes: bool,
pub track_usage: bool,
pub large_alloc_threshold: usize,
pub max_pool_memory: usize,
}
pub struct ManagedBuffer<T> {
buffer: Option<Vec<T>>,
recycler: Option<Arc<dyn Fn(Vec<T>) + Send + Sync>>,
stats: Arc<Mutex<MemoryStats>>,
}
impl<T: std::fmt::Debug> std::fmt::Debug for ManagedBuffer<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ManagedBuffer")
.field("buffer", &self.buffer)
.finish_non_exhaustive()
}
}
impl Default for MemoryManager {
fn default() -> Self {
Self::new()
}
}
impl MemoryManager {
pub fn new() -> Self {
Self::with_config(MemoryConfig::default())
}
pub fn with_config(config: MemoryConfig) -> Self {
let buffer_pools = BufferPools::new(&config);
Self {
buffer_pools: Arc::new(Mutex::new(buffer_pools)),
stats: Arc::new(Mutex::new(MemoryStats::new())),
config,
}
}
pub fn get_f32_buffer(&self, min_capacity: usize) -> ManagedBuffer<f32> {
if !self.config.enable_pooling {
return ManagedBuffer::new_unmanaged(Vec::with_capacity(min_capacity));
}
let mut pools = self.buffer_pools.lock().unwrap();
let mut stats = self.stats.lock().unwrap();
let (buffer, reused) = pools.f32_buffers.get_buffer(min_capacity);
let pool_arc = self.buffer_pools.clone();
stats.active_allocations += 1;
if reused {
stats.update_pool_hit();
}
drop(stats);
drop(pools);
ManagedBuffer {
buffer: Some(buffer),
recycler: Some(Arc::new(move |buffer: Vec<f32>| {
if let Ok(mut pools) = pool_arc.lock() {
pools.f32_buffers.return_buffer(buffer);
}
})),
stats: self.stats.clone(),
}
}
pub fn get_f64_buffer(&self, min_capacity: usize) -> ManagedBuffer<f64> {
if !self.config.enable_pooling {
return ManagedBuffer::new_unmanaged(Vec::with_capacity(min_capacity));
}
let mut pools = self.buffer_pools.lock().unwrap();
let mut stats = self.stats.lock().unwrap();
let (buffer, reused) = pools.f64_buffers.get_buffer(min_capacity);
let pool_arc = self.buffer_pools.clone();
stats.active_allocations += 1;
if reused {
stats.update_pool_hit();
}
drop(stats);
drop(pools);
ManagedBuffer {
buffer: Some(buffer),
recycler: Some(Arc::new(move |buffer: Vec<f64>| {
if let Ok(mut pools) = pool_arc.lock() {
pools.f64_buffers.return_buffer(buffer);
}
})),
stats: self.stats.clone(),
}
}
pub fn get_point_buffer(&self, min_capacity: usize) -> ManagedBuffer<Point2f> {
if !self.config.enable_pooling {
return ManagedBuffer::new_unmanaged(Vec::with_capacity(min_capacity));
}
let mut pools = self.buffer_pools.lock().unwrap();
let mut stats = self.stats.lock().unwrap();
let (buffer, reused) = pools.point_buffers.get_buffer(min_capacity);
let pool_arc = self.buffer_pools.clone();
stats.active_allocations += 1;
if reused {
stats.update_pool_hit();
}
drop(stats);
drop(pools);
ManagedBuffer {
buffer: Some(buffer),
recycler: Some(Arc::new(move |buffer: Vec<Point2f>| {
if let Ok(mut pools) = pool_arc.lock() {
pools.point_buffers.return_buffer(buffer);
}
})),
stats: self.stats.clone(),
}
}
pub fn get_u8_buffer(&self, min_capacity: usize) -> ManagedBuffer<u8> {
if !self.config.enable_pooling {
return ManagedBuffer::new_unmanaged(Vec::with_capacity(min_capacity));
}
let mut pools = self.buffer_pools.lock().unwrap();
let mut stats = self.stats.lock().unwrap();
let (buffer, reused) = pools.u8_buffers.get_buffer(min_capacity);
let pool_arc = self.buffer_pools.clone();
stats.active_allocations += 1;
if reused {
stats.update_pool_hit();
}
drop(stats);
drop(pools);
ManagedBuffer {
buffer: Some(buffer),
recycler: Some(Arc::new(move |buffer: Vec<u8>| {
if let Ok(mut pools) = pool_arc.lock() {
pools.u8_buffers.return_buffer(buffer);
}
})),
stats: self.stats.clone(),
}
}
pub fn allocate_block(
&self,
size: usize,
alignment: usize,
) -> Result<ManagedBlock, MemoryError> {
if size >= self.config.large_alloc_threshold {
let pool_arc = {
let pools = self.buffer_pools.lock().unwrap();
pools.block_pool.clone()
};
let mut pool = pool_arc.lock().unwrap();
return pool.allocate_block(size, alignment, pool_arc.clone());
}
let layout =
Layout::from_size_align(size, alignment).map_err(|_| MemoryError::InvalidLayout)?;
unsafe {
let ptr = alloc(layout);
if ptr.is_null() {
return Err(MemoryError::AllocationFailed);
}
Ok(ManagedBlock {
ptr,
size,
layout,
pool: None,
})
}
}
pub fn pre_allocate_common_sizes(&self) {
if !self.config.pre_allocate_common_sizes {
return;
}
let common_sizes = [
100, 1_000, 10_000, 100_000, ];
let mut pools = self.buffer_pools.lock().unwrap();
for &size in &common_sizes {
for _ in 0..3 {
pools.f32_buffers.pre_allocate(size);
pools.f64_buffers.pre_allocate(size);
pools.point_buffers.pre_allocate(size);
}
let pixel_size = size * 4; pools.u8_buffers.pre_allocate(pixel_size);
}
}
pub fn get_stats(&self) -> MemoryStats {
let stats = self.stats.lock().unwrap();
let pools = self.buffer_pools.lock().unwrap();
let mut stats_copy = stats.clone();
stats_copy.pool_stats = pools.get_pool_stats();
stats_copy
}
pub fn clear(&self) {
let mut pools = self.buffer_pools.lock().unwrap();
let mut stats = self.stats.lock().unwrap();
pools.clear();
stats.reset();
}
pub fn config(&self) -> &MemoryConfig {
&self.config
}
}
impl<T> BufferPool<T> {
fn new(max_pool_size: usize, min_capacity: usize) -> Self {
Self {
available: Vec::new(),
allocated_count: 0,
max_pool_size,
min_capacity,
}
}
fn get_buffer(&mut self, min_capacity: usize) -> (Vec<T>, bool) {
if min_capacity >= self.min_capacity {
if let Some(pos) = self
.available
.iter()
.position(|buf| buf.capacity() >= min_capacity)
{
let mut buffer = self.available.swap_remove(pos);
buffer.clear();
return (buffer, true);
}
}
self.allocated_count += 1;
(Vec::with_capacity(min_capacity), false)
}
fn return_buffer(&mut self, mut buffer: Vec<T>) {
buffer.clear();
if self.available.len() < self.max_pool_size && buffer.capacity() >= self.min_capacity {
let insert_pos = self
.available
.binary_search_by_key(&buffer.capacity(), |buf| buf.capacity())
.unwrap_or_else(|pos| pos);
self.available.insert(insert_pos, buffer);
}
self.allocated_count = self.allocated_count.saturating_sub(1);
}
fn pre_allocate(&mut self, capacity: usize) {
if self.available.len() < self.max_pool_size {
let buffer = Vec::with_capacity(capacity);
self.available.push(buffer);
}
}
fn clear(&mut self) {
self.available.clear();
self.allocated_count = 0;
}
fn memory_usage(&self) -> usize {
self.available
.iter()
.map(|buf| buf.capacity() * std::mem::size_of::<T>())
.sum()
}
}
impl BufferPools {
fn new(config: &MemoryConfig) -> Self {
Self {
f32_buffers: BufferPool::new(config.max_pool_size, config.min_pool_capacity),
f64_buffers: BufferPool::new(config.max_pool_size, config.min_pool_capacity),
u8_buffers: BufferPool::new(config.max_pool_size, config.min_pool_capacity),
u32_buffers: BufferPool::new(config.max_pool_size, config.min_pool_capacity),
point_buffers: BufferPool::new(config.max_pool_size, config.min_pool_capacity),
block_pool: Arc::new(Mutex::new(BlockPool::new())),
}
}
fn clear(&mut self) {
self.f32_buffers.clear();
self.f64_buffers.clear();
self.u8_buffers.clear();
self.u32_buffers.clear();
self.point_buffers.clear();
if let Ok(mut block_pool) = self.block_pool.lock() {
block_pool.clear();
}
}
fn get_pool_stats(&self) -> PoolStats {
let block_pool_mem = self
.block_pool
.lock()
.map(|pool| (pool.blocks.len(), pool.memory_usage()))
.unwrap_or((0, 0));
PoolStats {
f32_pool_size: self.f32_buffers.available.len(),
f64_pool_size: self.f64_buffers.available.len(),
u8_pool_size: self.u8_buffers.available.len(),
u32_pool_size: self.u32_buffers.available.len(),
point_pool_size: self.point_buffers.available.len(),
block_pool_size: block_pool_mem.0,
total_pool_memory: self.f32_buffers.memory_usage()
+ self.f64_buffers.memory_usage()
+ self.u8_buffers.memory_usage()
+ self.u32_buffers.memory_usage()
+ self.point_buffers.memory_usage()
+ block_pool_mem.1,
}
}
fn total_memory_usage(&self) -> usize {
let block_mem = self
.block_pool
.lock()
.map(|pool| pool.memory_usage())
.unwrap_or(0);
self.f32_buffers.memory_usage()
+ self.f64_buffers.memory_usage()
+ self.u8_buffers.memory_usage()
+ self.u32_buffers.memory_usage()
+ self.point_buffers.memory_usage()
+ block_mem
}
}
impl BlockPool {
fn new() -> Self {
Self {
blocks: Vec::new(),
stats: BlockStats {
total_blocks_allocated: 0,
total_blocks_reused: 0,
peak_block_count: 0,
},
}
}
fn allocate_block(
&mut self,
size: usize,
alignment: usize,
pool: Arc<Mutex<BlockPool>>,
) -> Result<ManagedBlock, MemoryError> {
if let Some(pos) = self.blocks.iter().position(|block| block.size >= size) {
let block = self.blocks.swap_remove(pos);
self.stats.total_blocks_reused += 1;
return Ok(ManagedBlock {
ptr: block.ptr,
size: block.size,
layout: block.layout,
pool: Some(pool),
});
}
let layout =
Layout::from_size_align(size, alignment).map_err(|_| MemoryError::InvalidLayout)?;
unsafe {
let ptr = alloc(layout);
if ptr.is_null() {
return Err(MemoryError::AllocationFailed);
}
self.stats.total_blocks_allocated += 1;
self.stats.peak_block_count = self.stats.peak_block_count.max(self.blocks.len() + 1);
Ok(ManagedBlock {
ptr,
size,
layout,
pool: Some(pool),
})
}
}
fn return_block(&mut self, block: MemoryBlock) {
self.blocks.push(block);
}
fn clear(&mut self) {
for block in self.blocks.drain(..) {
unsafe {
dealloc(block.ptr, block.layout);
}
}
self.stats = BlockStats {
total_blocks_allocated: 0,
total_blocks_reused: 0,
peak_block_count: 0,
};
}
fn memory_usage(&self) -> usize {
self.blocks.iter().map(|block| block.size).sum()
}
}
impl MemoryStats {
fn new() -> Self {
Self {
total_allocated: 0,
total_deallocated: 0,
current_usage: 0,
peak_usage: 0,
active_allocations: 0,
pool_hit_rate: 0.0,
pool_stats: PoolStats {
f32_pool_size: 0,
f64_pool_size: 0,
u8_pool_size: 0,
u32_pool_size: 0,
point_pool_size: 0,
block_pool_size: 0,
total_pool_memory: 0,
},
}
}
fn update_pool_hit(&mut self) {
self.pool_hit_rate = self.pool_hit_rate * 0.9 + 0.1;
}
fn reset(&mut self) {
*self = Self::new();
}
}
impl Default for MemoryConfig {
fn default() -> Self {
Self {
enable_pooling: true,
max_pool_size: 10,
min_pool_capacity: 100,
pre_allocate_common_sizes: true,
track_usage: true,
large_alloc_threshold: 1024 * 1024, max_pool_memory: 100 * 1024 * 1024, }
}
}
impl<T> ManagedBuffer<T> {
fn new_unmanaged(buffer: Vec<T>) -> Self {
Self {
buffer: Some(buffer),
recycler: None,
stats: Arc::new(Mutex::new(MemoryStats::new())),
}
}
pub fn get_mut(&mut self) -> &mut Vec<T> {
self.buffer.as_mut().unwrap()
}
pub fn get(&self) -> &Vec<T> {
self.buffer.as_ref().unwrap()
}
pub fn into_inner(mut self) -> Vec<T> {
self.buffer.take().unwrap()
}
}
impl<T> Drop for ManagedBuffer<T> {
fn drop(&mut self) {
if let Some(buffer) = self.buffer.take() {
if let Some(recycler) = &self.recycler {
recycler(buffer);
}
let mut stats = self.stats.lock().unwrap();
stats.active_allocations = stats.active_allocations.saturating_sub(1);
}
}
}
pub struct ManagedBlock {
ptr: *mut u8,
size: usize,
layout: Layout,
pool: Option<Arc<Mutex<BlockPool>>>,
}
unsafe impl Send for ManagedBlock {}
impl Drop for ManagedBlock {
fn drop(&mut self) {
if let Some(pool_arc) = &self.pool {
let mut pool = pool_arc.lock().unwrap();
pool.return_block(MemoryBlock {
ptr: self.ptr,
size: self.size,
layout: self.layout,
});
} else {
unsafe {
dealloc(self.ptr, self.layout);
}
}
}
}
#[derive(Debug, Clone)]
pub enum MemoryError {
AllocationFailed,
InvalidLayout,
PoolExhausted,
}
impl std::fmt::Display for MemoryError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MemoryError::AllocationFailed => write!(f, "Memory allocation failed"),
MemoryError::InvalidLayout => write!(f, "Invalid memory layout"),
MemoryError::PoolExhausted => write!(f, "Memory pool exhausted"),
}
}
}
impl std::error::Error for MemoryError {}
static GLOBAL_MEMORY_MANAGER: OnceLock<MemoryManager> = OnceLock::new();
pub fn get_memory_manager() -> &'static MemoryManager {
GLOBAL_MEMORY_MANAGER.get_or_init(|| {
let config = MemoryConfig::default();
let manager = MemoryManager::with_config(config);
manager.pre_allocate_common_sizes();
manager
})
}
pub fn initialize_memory_manager(config: MemoryConfig) -> Result<(), MemoryError> {
let manager = MemoryManager::with_config(config);
manager.pre_allocate_common_sizes();
GLOBAL_MEMORY_MANAGER
.set(manager)
.map_err(|_| MemoryError::InvalidLayout) }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_memory_manager_creation() {
let manager = MemoryManager::new();
let stats = manager.get_stats();
assert_eq!(stats.active_allocations, 0);
assert_eq!(stats.current_usage, 0);
}
#[test]
fn test_buffer_pooling() {
let manager = MemoryManager::new();
{
let _buffer = manager.get_f32_buffer(1000);
let stats = manager.get_stats();
assert_eq!(stats.active_allocations, 1);
}
let stats = manager.get_stats();
assert_eq!(stats.active_allocations, 0);
}
#[test]
fn test_buffer_reuse() {
let manager = MemoryManager::new();
{
let _buffer = manager.get_f32_buffer(1000);
}
{
let _buffer = manager.get_f32_buffer(800);
let stats = manager.get_stats();
assert!(stats.pool_hit_rate >= 0.0);
}
}
#[test]
fn test_pre_allocation() {
let manager = MemoryManager::new();
manager.pre_allocate_common_sizes();
let stats = manager.get_stats();
assert!(stats.pool_stats.total_pool_memory > 0);
}
#[test]
fn test_memory_config() {
let config = MemoryConfig {
enable_pooling: false,
max_pool_size: 5,
..Default::default()
};
let manager = MemoryManager::with_config(config);
assert!(!manager.config().enable_pooling);
assert_eq!(manager.config().max_pool_size, 5);
}
#[test]
fn test_allocate_block_smoke() {
let manager = MemoryManager::new();
let block = manager.allocate_block(64, 8);
assert!(block.is_ok());
drop(block);
}
}