use std::{
alloc::{GlobalAlloc, Layout, System},
mem::{self, ManuallyDrop},
ptr::NonNull,
sync::{
Mutex,
atomic::{AtomicBool, Ordering},
},
};
#[cfg(target_os = "windows")]
use std::sync::Condvar;
use oxc_ast_macros::ast;
use oxc_data_structures::stack::Stack;
use crate::{
Allocator,
generated::fixed_size_constants::{BLOCK_ALIGN, BLOCK_SIZE, RAW_METADATA_SIZE},
};
const TWO_GIB: usize = 1 << 31;
const FOUR_GIB: usize = 1 << 32;
pub struct FixedSizeAllocatorPool {
allocators: Mutex<Stack<FixedSizeAllocator>>,
#[cfg(target_os = "windows")]
available: Condvar,
}
impl FixedSizeAllocatorPool {
#[cfg(not(target_os = "windows"))]
pub fn new(thread_count: usize) -> Self {
let mut allocators = Stack::with_capacity(thread_count);
for i in 0..thread_count {
#[expect(clippy::cast_possible_truncation)]
let allocator = FixedSizeAllocator::try_new(i as u32).unwrap();
allocators.push(allocator);
}
Self { allocators: Mutex::new(allocators) }
}
#[cfg(target_os = "windows")]
pub fn new(thread_count: usize) -> Self {
let capacity = thread_count + 1;
let mut allocators = Stack::with_capacity(capacity);
for i in 0..capacity {
#[expect(clippy::cast_possible_truncation)]
let allocator = FixedSizeAllocator::try_new(i as u32);
let Ok(allocator) = allocator else { break };
allocators.push(allocator);
}
match allocators.len() {
0 => panic!("Insufficient memory to create fixed-size allocator pool"),
1 => {}
_ => {
allocators.pop();
}
}
Self { allocators: Mutex::new(allocators), available: Condvar::new() }
}
#[cfg(not(target_os = "windows"))]
pub fn get(&self) -> Allocator {
let maybe_allocator = {
let mut allocators_guard = self.allocators.lock().unwrap();
allocators_guard.pop()
};
if let Some(allocator) = maybe_allocator {
allocator.into_inner()
} else {
panic!("Tried to get an allocator from an empty `FixedSizeAllocatorPool`")
}
}
#[cfg(target_os = "windows")]
pub fn get(&self) -> Allocator {
let mut allocators_guard = self.allocators.lock().unwrap();
loop {
if let Some(allocator) = allocators_guard.pop() {
return allocator.into_inner();
}
allocators_guard = self.available.wait(allocators_guard).unwrap();
}
}
pub(super) unsafe fn add(&self, allocator: Allocator) {
let mut fixed_size_allocator =
FixedSizeAllocator { allocator: ManuallyDrop::new(allocator) };
fixed_size_allocator.reset();
{
let mut allocators_guard = self.allocators.lock().unwrap();
allocators_guard.push(fixed_size_allocator);
}
#[cfg(target_os = "windows")]
self.available.notify_one();
}
}
#[ast]
pub struct FixedSizeAllocatorMetadata {
pub id: u32,
pub(crate) alloc_ptr: NonNull<u8>,
pub is_double_owned: AtomicBool,
}
const ALLOC_SIZE: usize = BLOCK_SIZE + TWO_GIB;
const ALLOC_ALIGN: usize = TWO_GIB;
const ALLOC_LAYOUT: Layout = match Layout::from_size_align(ALLOC_SIZE, ALLOC_ALIGN) {
Ok(layout) => layout,
Err(_) => unreachable!(),
};
#[repr(transparent)]
struct FixedSizeAllocator {
allocator: ManuallyDrop<Allocator>,
}
impl FixedSizeAllocator {
#[expect(clippy::items_after_statements)]
fn try_new(id: u32) -> Result<Self, ()> {
#[expect(clippy::manual_assert)]
if cfg!(target_endian = "big") {
panic!("`FixedSizeAllocator` is not supported on big-endian systems.");
}
let alloc_ptr = unsafe { System.alloc(ALLOC_LAYOUT) };
let alloc_ptr = NonNull::new(alloc_ptr).ok_or(())?;
let offset = alloc_ptr.as_ptr() as usize % FOUR_GIB;
let chunk_ptr = unsafe { alloc_ptr.add(offset) };
debug_assert!((chunk_ptr.as_ptr() as usize).is_multiple_of(BLOCK_ALIGN));
const FIXED_METADATA_SIZE_ROUNDED: usize =
size_of::<FixedSizeAllocatorMetadata>().next_multiple_of(Allocator::RAW_MIN_ALIGN);
const FIXED_METADATA_OFFSET: usize = BLOCK_SIZE - FIXED_METADATA_SIZE_ROUNDED;
const _: () =
assert!(FIXED_METADATA_OFFSET.is_multiple_of(align_of::<FixedSizeAllocatorMetadata>()));
const CHUNK_SIZE: usize = FIXED_METADATA_OFFSET - RAW_METADATA_SIZE;
const _: () = assert!(CHUNK_SIZE.is_multiple_of(Allocator::RAW_MIN_ALIGN));
let allocator = unsafe { Allocator::from_raw_parts(chunk_ptr, CHUNK_SIZE) };
let allocator = ManuallyDrop::new(allocator);
let metadata =
FixedSizeAllocatorMetadata { alloc_ptr, id, is_double_owned: AtomicBool::new(false) };
unsafe {
let metadata_ptr =
chunk_ptr.add(FIXED_METADATA_OFFSET).cast::<FixedSizeAllocatorMetadata>();
metadata_ptr.write(metadata);
}
Ok(Self { allocator })
}
fn reset(&mut self) {
self.allocator.reset();
}
#[inline] fn into_inner(self) -> Allocator {
let allocator =
unsafe { mem::transmute::<FixedSizeAllocator, ManuallyDrop<Allocator>>(self) };
ManuallyDrop::into_inner(allocator)
}
}
impl Drop for FixedSizeAllocator {
fn drop(&mut self) {
unsafe {
let metadata_ptr = self.allocator.fixed_size_metadata_ptr();
free_fixed_size_allocator(metadata_ptr);
}
}
}
pub unsafe fn free_fixed_size_allocator(metadata_ptr: NonNull<FixedSizeAllocatorMetadata>) {
let alloc_ptr = {
let metadata = unsafe { metadata_ptr.as_ref() };
let is_double_owned = metadata.is_double_owned.swap(false, Ordering::SeqCst);
if is_double_owned {
return;
}
metadata.alloc_ptr
};
unsafe { System.dealloc(alloc_ptr.as_ptr(), ALLOC_LAYOUT) }
}
impl Allocator {
pub unsafe fn fixed_size_metadata_ptr(&self) -> NonNull<FixedSizeAllocatorMetadata> {
unsafe { self.end_ptr().add(RAW_METADATA_SIZE).cast::<FixedSizeAllocatorMetadata>() }
}
}