#![cfg_attr(not(asan), allow(dead_code))]
use crate::{runtime::vm::PoolingInstanceAllocatorConfig, PoolConcurrencyLimitError};
use anyhow::{bail, Result};
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Debug)]
pub struct StackPool {
stack_size: usize,
live_stacks: AtomicU64,
stack_limit: u64,
}
impl StackPool {
pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
Ok(StackPool {
stack_size: config.stack_size,
live_stacks: AtomicU64::new(0),
stack_limit: config.limits.total_stacks.into(),
})
}
#[allow(unused)] pub fn is_empty(&self) -> bool {
self.live_stacks.load(Ordering::Acquire) == 0
}
pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
if self.stack_size == 0 {
bail!("fiber stack allocation not supported")
}
let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
if old_count >= self.stack_limit {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
return Err(PoolConcurrencyLimitError::new(
usize::try_from(self.stack_limit).unwrap(),
"fibers",
)
.into());
}
match wasmtime_fiber::FiberStack::new(self.stack_size) {
Ok(stack) => Ok(stack),
Err(e) => {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
Err(anyhow::Error::from(e))
}
}
}
pub unsafe fn zero_stack(
&self,
_stack: &mut wasmtime_fiber::FiberStack,
_decommit: impl FnMut(*mut u8, usize),
) {
}
pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
let _ = stack;
}
}