use std::{
mem::{self, ManuallyDrop},
ptr::NonNull,
sync::{
Mutex,
atomic::{AtomicBool, Ordering},
},
};
#[cfg(target_os = "windows")]
use std::sync::Condvar;
use oxc_ast_macros::ast;
use oxc_data_structures::stack::Stack;
use crate::{
Allocator,
arena::{CHUNK_FOOTER_SIZE, ChunkFooter, dealloc_arena_chunk},
generated::fixed_size_constants::{
ACTIVE_SIZE, BLOCK_SIZE, BUFFER_SIZE, CURSOR_MIN_ALIGN, RAW_METADATA_ALIGN,
RAW_METADATA_SIZE,
},
};
pub struct FixedSizeAllocatorPool {
allocators: Mutex<Stack<FixedSizeAllocator>>,
#[cfg(target_os = "windows")]
available: Condvar,
}
impl FixedSizeAllocatorPool {
#[cfg(not(target_os = "windows"))]
pub fn new(thread_count: usize) -> Self {
let mut allocators = Stack::with_capacity(thread_count);
for i in 0..thread_count {
#[expect(clippy::cast_possible_truncation)]
let allocator = FixedSizeAllocator::try_new(i as u32).unwrap();
allocators.push(allocator);
}
Self { allocators: Mutex::new(allocators) }
}
#[cfg(target_os = "windows")]
pub fn new(thread_count: usize) -> Self {
let capacity = thread_count + 1;
let mut allocators = Stack::with_capacity(capacity);
for i in 0..capacity {
#[expect(clippy::cast_possible_truncation)]
let allocator = FixedSizeAllocator::try_new(i as u32);
let Ok(allocator) = allocator else { break };
allocators.push(allocator);
}
match allocators.len() {
0 => panic!("Insufficient memory to create fixed-size allocator pool"),
1 => {}
_ => {
allocators.pop();
}
}
Self { allocators: Mutex::new(allocators), available: Condvar::new() }
}
#[cfg(not(target_os = "windows"))]
pub fn get(&self) -> Allocator {
let maybe_allocator = {
let mut allocators_guard = self.allocators.lock().unwrap();
allocators_guard.pop()
};
if let Some(allocator) = maybe_allocator {
allocator.into_inner()
} else {
panic!("Tried to get an allocator from an empty `FixedSizeAllocatorPool`")
}
}
#[cfg(target_os = "windows")]
pub fn get(&self) -> Allocator {
let mut allocators_guard = self.allocators.lock().unwrap();
loop {
if let Some(allocator) = allocators_guard.pop() {
return allocator.into_inner();
}
allocators_guard = self.available.wait(allocators_guard).unwrap();
}
}
pub(super) unsafe fn add(&self, allocator: Allocator) {
let mut fixed_size_allocator =
FixedSizeAllocator { allocator: ManuallyDrop::new(allocator) };
fixed_size_allocator.reset();
{
let mut allocators_guard = self.allocators.lock().unwrap();
allocators_guard.push(fixed_size_allocator);
}
#[cfg(target_os = "windows")]
self.available.notify_one();
}
}
#[ast]
pub struct FixedSizeAllocatorMetadata {
pub id: u32,
pub is_double_owned: AtomicBool,
}
const FIXED_METADATA_SIZE: usize = size_of::<FixedSizeAllocatorMetadata>();
const FIXED_METADATA_OFFSET: usize = BUFFER_SIZE;
const RAW_METADATA_OFFSET: usize = ACTIVE_SIZE;
const _: () = {
assert!(BLOCK_SIZE.is_multiple_of(Allocator::RAW_MIN_ALIGN));
assert!(CHUNK_FOOTER_SIZE.is_multiple_of(Allocator::RAW_MIN_ALIGN));
assert!(FIXED_METADATA_OFFSET.is_multiple_of(align_of::<FixedSizeAllocatorMetadata>()));
assert!(
FIXED_METADATA_OFFSET + size_of::<FixedSizeAllocatorMetadata>()
== BLOCK_SIZE - CHUNK_FOOTER_SIZE
);
assert!(RAW_METADATA_OFFSET.is_multiple_of(RAW_METADATA_ALIGN));
assert!(RAW_METADATA_OFFSET + RAW_METADATA_SIZE == FIXED_METADATA_OFFSET);
assert!(RAW_METADATA_OFFSET.is_multiple_of(CURSOR_MIN_ALIGN));
};
#[repr(transparent)]
struct FixedSizeAllocator {
allocator: ManuallyDrop<Allocator>,
}
impl FixedSizeAllocator {
fn try_new(id: u32) -> Result<Self, ()> {
#[expect(clippy::manual_assert)]
if cfg!(target_endian = "big") {
panic!("`FixedSizeAllocator` is not supported on big-endian systems.");
}
let allocator = Allocator::new_fixed_size().ok_or(())?;
let allocator = ManuallyDrop::new(allocator);
let chunk_ptr = unsafe { allocator.data_end_ptr().sub(BLOCK_SIZE - CHUNK_FOOTER_SIZE) };
assert!(
allocator.arena().min_align() == CURSOR_MIN_ALIGN,
"Update `CURSOR_MIN_ALIGN` to match `Arena`'s `MIN_ALIGN`",
);
unsafe { allocator.set_cursor_ptr(chunk_ptr.add(RAW_METADATA_OFFSET)) };
let metadata = FixedSizeAllocatorMetadata { id, is_double_owned: AtomicBool::new(false) };
unsafe {
let metadata_ptr =
chunk_ptr.add(FIXED_METADATA_OFFSET).cast::<FixedSizeAllocatorMetadata>();
metadata_ptr.write(metadata);
}
Ok(Self { allocator })
}
fn reset(&mut self) {
unsafe {
let cursor_ptr =
self.allocator.data_end_ptr().sub(FIXED_METADATA_SIZE + RAW_METADATA_SIZE);
debug_assert!(cursor_ptr.addr().get().is_multiple_of(CURSOR_MIN_ALIGN));
self.allocator.set_cursor_ptr(cursor_ptr);
}
}
#[inline] fn into_inner(self) -> Allocator {
let allocator =
unsafe { mem::transmute::<FixedSizeAllocator, ManuallyDrop<Allocator>>(self) };
ManuallyDrop::into_inner(allocator)
}
}
impl Drop for FixedSizeAllocator {
fn drop(&mut self) {
unsafe {
let metadata_ptr = self.allocator.fixed_size_metadata_ptr();
free_fixed_size_allocator(metadata_ptr);
}
}
}
pub unsafe fn free_fixed_size_allocator(metadata_ptr: NonNull<FixedSizeAllocatorMetadata>) {
{
let metadata = unsafe { metadata_ptr.as_ref() };
if metadata.is_double_owned.swap(false, Ordering::SeqCst) {
return;
}
}
unsafe {
let footer_ptr = metadata_ptr.byte_add(FIXED_METADATA_SIZE).cast::<ChunkFooter>();
dealloc_arena_chunk(footer_ptr);
}
}
impl Allocator {
pub unsafe fn fixed_size_metadata_ptr(&self) -> NonNull<FixedSizeAllocatorMetadata> {
unsafe { self.data_end_ptr().sub(FIXED_METADATA_SIZE).cast::<FixedSizeAllocatorMetadata>() }
}
}