#![cfg_attr(
all(unix, not(miri), not(asan)),
expect(dead_code, reason = "not used, but typechecked")
)]
use crate::PoolConcurrencyLimitError;
use crate::prelude::*;
use crate::runtime::vm::PoolingInstanceAllocatorConfig;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Debug)]
pub struct StackPool {
stack_size: usize,
stack_zeroing: bool,
live_stacks: AtomicU64,
stack_limit: u64,
}
impl StackPool {
#[cfg(test)]
pub fn enabled() -> bool {
false
}
pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
Ok(StackPool {
stack_size: config.stack_size,
stack_zeroing: config.async_stack_zeroing,
live_stacks: AtomicU64::new(0),
stack_limit: config.limits.total_stacks.into(),
})
}
pub fn is_empty(&self) -> bool {
self.live_stacks.load(Ordering::Acquire) == 0
}
pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
if self.stack_size == 0 {
bail!("fiber stack allocation not supported")
}
let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
if old_count >= self.stack_limit {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
return Err(PoolConcurrencyLimitError::new(
usize::try_from(self.stack_limit).unwrap(),
"fibers",
)
.into());
}
match wasmtime_fiber::FiberStack::new(self.stack_size, self.stack_zeroing) {
Ok(stack) => Ok(stack),
Err(e) => {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
#[allow(
clippy::useless_conversion,
reason = "some `cfg`s have `wasmtime::Error` as the error type, others don't"
)]
let e = crate::Error::from(e);
Err(e)
}
}
}
pub unsafe fn zero_stack(
&self,
_stack: &mut wasmtime_fiber::FiberStack,
_decommit: impl FnMut(*mut u8, usize),
) -> usize {
0
}
pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack, _bytes_resident: usize) {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
let _ = stack;
}
pub fn unused_warm_slots(&self) -> u32 {
0
}
pub fn unused_bytes_resident(&self) -> Option<usize> {
None
}
}