use std::{
alloc::{Layout, alloc, dealloc},
collections::HashMap,
ptr::NonNull,
sync::atomic::{AtomicUsize, Ordering},
};
use typescript_types::TsValue;
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096];
const LARGE_BLOCK_SIZE: usize = 4096;
pub struct MemoryPool {
free_blocks: HashMap<usize, Vec<NonNull<u8>>>,
total_allocated: AtomicUsize,
total_used: AtomicUsize,
}
unsafe impl Send for MemoryPool {}
unsafe impl Sync for MemoryPool {}
impl MemoryPool {
pub fn new() -> Self {
let mut free_blocks = HashMap::new();
for &size in BLOCK_SIZES {
free_blocks.insert(size, Vec::new());
}
Self { free_blocks, total_allocated: AtomicUsize::new(0), total_used: AtomicUsize::new(0) }
}
pub fn allocate(&mut self, size: usize) -> Option<NonNull<u8>> {
let block_size = self.round_up_block_size(size);
if let Some(blocks) = self.free_blocks.get_mut(&block_size) {
if let Some(block) = blocks.pop() {
self.total_used.fetch_add(block_size, Ordering::Relaxed);
return Some(block);
}
}
unsafe {
let layout = Layout::from_size_align(block_size, 8).ok()?;
let ptr = alloc(layout);
if ptr.is_null() {
return None;
}
self.total_allocated.fetch_add(block_size, Ordering::Relaxed);
self.total_used.fetch_add(block_size, Ordering::Relaxed);
Some(NonNull::new_unchecked(ptr))
}
}
pub fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) {
let block_size = self.round_up_block_size(size);
if let Some(blocks) = self.free_blocks.get_mut(&block_size) {
blocks.push(ptr);
self.total_used.fetch_sub(block_size, Ordering::Relaxed);
}
else {
unsafe {
let layout = Layout::from_size_align(block_size, 8).unwrap();
dealloc(ptr.as_ptr(), layout);
self.total_allocated.fetch_sub(block_size, Ordering::Relaxed);
self.total_used.fetch_sub(block_size, Ordering::Relaxed);
}
}
}
pub fn stats(&self) -> MemoryStats {
MemoryStats {
total_allocated: self.total_allocated.load(Ordering::Relaxed),
total_used: self.total_used.load(Ordering::Relaxed),
free_blocks: self.free_blocks.iter().map(|(size, blocks)| (*size, blocks.len())).collect(),
}
}
pub fn cleanup(&mut self) {
for (size, blocks) in &mut self.free_blocks {
for block in blocks.drain(..) {
unsafe {
let layout = Layout::from_size_align(*size, 8).unwrap();
dealloc(block.as_ptr(), layout);
}
self.total_allocated.fetch_sub(*size, Ordering::Relaxed);
}
}
}
fn round_up_block_size(&self, size: usize) -> usize {
for &block_size in BLOCK_SIZES {
if size <= block_size {
return block_size;
}
}
((size + LARGE_BLOCK_SIZE - 1) / LARGE_BLOCK_SIZE) * LARGE_BLOCK_SIZE
}
}
impl Default for MemoryPool {
fn default() -> Self {
Self::new()
}
}
impl Drop for MemoryPool {
fn drop(&mut self) {
self.cleanup();
}
}
pub struct ObjectPool<T> {
free_objects: Vec<T>,
capacity: usize,
factory: Box<dyn Fn() -> T>,
reset: Box<dyn Fn(&mut T)>,
}
impl<T> ObjectPool<T> {
pub fn new<F, R>(capacity: usize, factory: F, reset: R) -> Self
where
F: Fn() -> T + 'static,
R: Fn(&mut T) + 'static,
{
Self { free_objects: Vec::with_capacity(capacity), capacity, factory: Box::new(factory), reset: Box::new(reset) }
}
pub fn acquire(&mut self) -> T {
if let Some(mut obj) = self.free_objects.pop() {
(self.reset)(&mut obj);
obj
}
else {
(self.factory)()
}
}
pub fn release(&mut self, obj: T) {
if self.free_objects.len() < self.capacity {
self.free_objects.push(obj);
}
}
pub fn free_count(&self) -> usize {
self.free_objects.len()
}
pub fn clear(&mut self) {
self.free_objects.clear();
}
}
#[derive(Debug, Clone)]
pub struct MemoryStats {
pub total_allocated: usize,
pub total_used: usize,
pub free_blocks: HashMap<usize, usize>,
}
impl MemoryStats {
pub fn total_free(&self) -> usize {
self.total_allocated.saturating_sub(self.total_used)
}
pub fn usage_percentage(&self) -> f64 {
if self.total_allocated == 0 { 0.0 } else { (self.total_used as f64 / self.total_allocated as f64) * 100.0 }
}
}
pub trait Allocator: Send {
fn allocate(&mut self, size: usize) -> Option<NonNull<u8>>;
fn deallocate(&mut self, ptr: NonNull<u8>, size: usize);
fn stats(&self) -> MemoryStats;
fn box_clone(&self) -> Box<dyn Allocator>;
}
pub struct BuddyAllocator {
free_lists: Vec<Vec<NonNull<u8>>>,
total_allocated: AtomicUsize,
total_used: AtomicUsize,
min_block_size: usize,
max_block_size: usize,
}
unsafe impl Send for BuddyAllocator {}
unsafe impl Sync for BuddyAllocator {}
impl BuddyAllocator {
pub fn new(min_block_size: usize, max_block_size: usize) -> Self {
let min_block_size = Self::next_power_of_two(min_block_size);
let max_block_size = Self::next_power_of_two(max_block_size);
let levels = (max_block_size.trailing_zeros() - min_block_size.trailing_zeros() + 1) as usize;
let mut free_lists = Vec::with_capacity(levels);
for _ in 0..levels {
free_lists.push(Vec::new());
}
let initial_memory = unsafe {
let layout = Layout::from_size_align(max_block_size, max_block_size).unwrap();
let ptr = alloc(layout);
if ptr.is_null() {
panic!("Failed to allocate initial memory for buddy allocator");
}
NonNull::new_unchecked(ptr)
};
let max_level = free_lists.len() - 1;
free_lists[max_level].push(initial_memory);
Self {
free_lists,
total_allocated: AtomicUsize::new(max_block_size),
total_used: AtomicUsize::new(0),
min_block_size,
max_block_size,
}
}
fn next_power_of_two(mut size: usize) -> usize {
size -= 1;
size |= size >> 1;
size |= size >> 2;
size |= size >> 4;
size |= size >> 8;
size |= size >> 16;
size + 1
}
fn get_level(&self, size: usize) -> usize {
let normalized_size = if size < self.min_block_size { self.min_block_size } else { Self::next_power_of_two(size) };
(normalized_size.trailing_zeros() - self.min_block_size.trailing_zeros()) as usize
}
fn find_buddy(&self, ptr: NonNull<u8>, size: usize) -> NonNull<u8> {
let ptr_addr = ptr.as_ptr() as usize;
let buddy_addr = ptr_addr ^ size;
NonNull::new(buddy_addr as *mut u8).unwrap()
}
fn is_buddy_free(&self, buddy: NonNull<u8>, level: usize) -> bool {
self.free_lists[level].contains(&buddy)
}
}
impl Allocator for BuddyAllocator {
fn allocate(&mut self, size: usize) -> Option<NonNull<u8>> {
let required_size = if size < self.min_block_size { self.min_block_size } else { Self::next_power_of_two(size) };
let level = self.get_level(required_size);
for i in level..self.free_lists.len() {
if !self.free_lists[i].is_empty() {
let block = self.free_lists[i].pop().unwrap();
let mut current_size = self.min_block_size << i;
let mut current_block = block;
let mut current_level = i;
while current_size > required_size {
current_size /= 2;
let buddy = self.find_buddy(current_block, current_size);
current_level -= 1;
self.free_lists[current_level].push(buddy);
}
self.total_used.fetch_add(required_size, Ordering::Relaxed);
return Some(current_block);
}
}
None
}
fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) {
let required_size = if size < self.min_block_size { self.min_block_size } else { Self::next_power_of_two(size) };
let mut level = self.get_level(required_size);
let mut current_block = ptr;
let mut current_size = required_size;
loop {
let buddy = self.find_buddy(current_block, current_size);
if level >= self.free_lists.len() - 1 || !self.is_buddy_free(buddy, level) {
self.free_lists[level].push(current_block);
break;
}
let index = self.free_lists[level].iter().position(|&b| b == buddy).unwrap();
self.free_lists[level].remove(index);
current_block = if (current_block.as_ptr() as usize) < (buddy.as_ptr() as usize) { current_block } else { buddy };
current_size *= 2;
level += 1;
}
self.total_used.fetch_sub(required_size, Ordering::Relaxed);
}
fn stats(&self) -> MemoryStats {
let mut free_blocks = HashMap::new();
for (i, list) in self.free_lists.iter().enumerate() {
let block_size = self.min_block_size << i;
free_blocks.insert(block_size, list.len());
}
MemoryStats {
total_allocated: self.total_allocated.load(Ordering::Relaxed),
total_used: self.total_used.load(Ordering::Relaxed),
free_blocks,
}
}
fn box_clone(&self) -> Box<dyn Allocator> {
Box::new(BuddyAllocator::new(self.min_block_size, self.max_block_size))
}
}
impl Drop for BuddyAllocator {
fn drop(&mut self) {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllocationStrategy {
Default,
Buddy,
}
pub struct DefaultAllocator {
pool: MemoryPool,
}
impl DefaultAllocator {
pub fn new() -> Self {
Self { pool: MemoryPool::new() }
}
}
impl Default for DefaultAllocator {
fn default() -> Self {
Self::new()
}
}
impl Allocator for DefaultAllocator {
fn allocate(&mut self, size: usize) -> Option<NonNull<u8>> {
self.pool.allocate(size)
}
fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) {
self.pool.deallocate(ptr, size);
}
fn stats(&self) -> MemoryStats {
self.pool.stats()
}
fn box_clone(&self) -> Box<dyn Allocator> {
Box::new(DefaultAllocator { pool: MemoryPool::new() })
}
}
pub struct AllocatorFactory;
impl AllocatorFactory {
pub fn create(strategy: AllocationStrategy) -> Box<dyn Allocator> {
match strategy {
AllocationStrategy::Default => Box::new(DefaultAllocator::new()),
AllocationStrategy::Buddy => Box::new(BuddyAllocator::new(8, 4096)),
}
}
}
pub struct GlobalMemoryManager {
pool: MemoryPool,
string_pool: ObjectPool<String>,
tsvalue_array_pool: ObjectPool<Vec<TsValue>>,
string_tsvalue_pool: ObjectPool<Vec<(String, TsValue)>>,
gc_threshold: usize,
allocations_since_gc: AtomicUsize,
}
impl GlobalMemoryManager {
pub fn new(gc_threshold: usize) -> Self {
Self {
pool: MemoryPool::new(),
string_pool: ObjectPool::new(1000, String::new, |s| {
s.clear();
}),
tsvalue_array_pool: ObjectPool::new(
500,
|| Vec::with_capacity(10),
|v| {
v.clear();
},
),
string_tsvalue_pool: ObjectPool::new(
500,
|| Vec::with_capacity(10),
|v| {
v.clear();
},
),
gc_threshold,
allocations_since_gc: AtomicUsize::new(0),
}
}
pub fn allocate(&mut self, size: usize) -> Option<NonNull<u8>> {
self.allocations_since_gc.fetch_add(1, Ordering::Relaxed);
self.pool.allocate(size)
}
pub fn deallocate(&mut self, ptr: NonNull<u8>, size: usize) {
self.pool.deallocate(ptr, size);
}
pub fn acquire_string(&mut self) -> String {
self.string_pool.acquire()
}
pub fn release_string(&mut self, s: String) {
self.string_pool.release(s);
}
pub fn acquire_tsvalue_array(&mut self) -> Vec<TsValue> {
self.tsvalue_array_pool.acquire()
}
pub fn release_tsvalue_array(&mut self, v: Vec<TsValue>) {
self.tsvalue_array_pool.release(v);
}
pub fn acquire_string_tsvalue(&mut self) -> Vec<(String, TsValue)> {
self.string_tsvalue_pool.acquire()
}
pub fn release_string_tsvalue(&mut self, v: Vec<(String, TsValue)>) {
self.string_tsvalue_pool.release(v);
}
pub fn should_gc(&self) -> bool {
self.allocations_since_gc.load(Ordering::Relaxed) >= self.gc_threshold
}
pub fn reset_gc_counter(&self) {
self.allocations_since_gc.store(0, Ordering::Relaxed);
}
pub fn stats(&self) -> MemoryStats {
self.pool.stats()
}
pub fn set_gc_threshold(&mut self, threshold: usize) {
self.gc_threshold = threshold;
}
pub fn gc_threshold(&self) -> usize {
self.gc_threshold
}
pub fn cleanup(&mut self) {
self.pool.cleanup();
self.string_pool.clear();
self.tsvalue_array_pool.clear();
self.string_tsvalue_pool.clear();
}
}
impl Default for GlobalMemoryManager {
fn default() -> Self {
Self::new(10000)
}
}