mod stack;
use core::{marker::PhantomPinned, ptr::NonNull};
pub use stack::*;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct VMContObj {
pub contref: NonNull<VMContRef>,
pub revision: usize,
}
impl VMContObj {
pub fn new(contref: NonNull<VMContRef>, revision: usize) -> Self {
Self { contref, revision }
}
pub unsafe fn from_raw_parts(contref: *mut u8, revision: usize) -> Option<Self> {
NonNull::new(contref.cast::<VMContRef>()).map(|contref| Self::new(contref, revision))
}
}
unsafe impl Send for VMContObj {}
unsafe impl Sync for VMContObj {}
#[repr(C)]
#[derive(Debug, Default, Clone)]
pub struct VMStackLimits {
pub stack_limit: usize,
pub last_wasm_entry_fp: usize,
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct VMCommonStackInformation {
pub limits: VMStackLimits,
pub state: VMStackState,
pub handlers: VMHandlerList,
pub first_switch_handler_index: u32,
}
impl VMCommonStackInformation {
pub fn running_default() -> Self {
Self {
limits: VMStackLimits::default(),
state: VMStackState::Running,
handlers: VMHandlerList::empty(),
first_switch_handler_index: 0,
}
}
}
impl VMStackLimits {
pub fn with_stack_limit(stack_limit: usize) -> Self {
Self {
stack_limit,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct VMHostArray<T> {
pub length: u32,
pub capacity: u32,
pub data: *mut T,
}
impl<T> VMHostArray<T> {
pub fn empty() -> Self {
Self {
length: 0,
capacity: 0,
data: core::ptr::null_mut(),
}
}
pub fn clear(&mut self) {
*self = Self::empty();
}
}
pub type VMPayloads = VMHostArray<u128>;
pub type VMHandlerList = VMHostArray<*mut u8>;
#[repr(C)]
pub struct VMContRef {
pub common_stack_information: VMCommonStackInformation,
pub parent_chain: VMStackChain,
pub last_ancestor: *mut VMContRef,
pub revision: usize,
pub stack: VMContinuationStack,
pub args: VMPayloads,
pub values: VMPayloads,
_marker: core::marker::PhantomPinned,
}
impl VMContRef {
pub fn fiber_stack(&self) -> &VMContinuationStack {
&self.stack
}
pub fn detach_stack(&mut self) -> VMContinuationStack {
core::mem::replace(&mut self.stack, VMContinuationStack::unallocated())
}
pub fn empty() -> Self {
let limits = VMStackLimits::with_stack_limit(Default::default());
let state = VMStackState::Fresh;
let handlers = VMHandlerList::empty();
let common_stack_information = VMCommonStackInformation {
limits,
state,
handlers,
first_switch_handler_index: 0,
};
let parent_chain = VMStackChain::Absent;
let last_ancestor = core::ptr::null_mut();
let stack = VMContinuationStack::unallocated();
let args = VMPayloads::empty();
let values = VMPayloads::empty();
let revision = 0;
let _marker = PhantomPinned;
Self {
common_stack_information,
parent_chain,
last_ancestor,
stack,
args,
values,
revision,
_marker,
}
}
}
impl Drop for VMContRef {
fn drop(&mut self) {
}
}
unsafe impl Send for VMContRef {}
unsafe impl Sync for VMContRef {}
#[cfg(feature = "stack-switching")]
#[inline(always)]
pub fn cont_new(
store: &mut dyn crate::vm::VMStore,
instance: crate::store::InstanceId,
func: *mut u8,
param_count: u32,
result_count: u32,
) -> crate::Result<*mut VMContRef> {
let instance = store.instance_mut(instance);
let caller_vmctx = instance.vmctx();
let stack_size = store.engine().config().async_stack_size;
let contref = store.allocate_continuation()?;
let contref = unsafe { contref.as_mut().unwrap() };
let tsp = contref.stack.top().unwrap();
contref.parent_chain = VMStackChain::Absent;
contref.last_ancestor = contref;
let contref_args_ptr = &mut contref.args as *mut _ as *mut VMHostArray<crate::ValRaw>;
contref.stack.initialize(
func.cast::<crate::vm::VMFuncRef>(),
caller_vmctx.as_ptr(),
contref_args_ptr,
param_count,
result_count,
);
let stack_pointer = contref.stack.control_context_stack_pointer();
let wasm_stack_limit = core::cmp::max(
stack_pointer - store.engine().config().max_wasm_stack,
tsp as usize - stack_size,
);
let limits = VMStackLimits::with_stack_limit(wasm_stack_limit);
let csi = &mut contref.common_stack_information;
csi.state = VMStackState::Fresh;
csi.limits = limits;
log::trace!("Created contref @ {contref:p}");
Ok(contref)
}
#[derive(Debug, Clone, PartialEq)]
#[repr(usize, C)]
pub enum VMStackChain {
Absent = wasmtime_environ::STACK_CHAIN_ABSENT_DISCRIMINANT,
InitialStack(*mut VMCommonStackInformation) =
wasmtime_environ::STACK_CHAIN_INITIAL_STACK_DISCRIMINANT,
Continuation(*mut VMContRef) = wasmtime_environ::STACK_CHAIN_CONTINUATION_DISCRIMINANT,
}
impl VMStackChain {
pub fn is_initial_stack(&self) -> bool {
matches!(self, VMStackChain::InitialStack(_))
}
pub unsafe fn into_continuation_iter(self) -> ContinuationIterator {
ContinuationIterator(self)
}
pub unsafe fn into_stack_limits_iter(self) -> StackLimitsIterator {
StackLimitsIterator(self)
}
}
pub struct ContinuationIterator(VMStackChain);
pub struct StackLimitsIterator(VMStackChain);
impl Iterator for ContinuationIterator {
type Item = *mut VMContRef;
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
VMStackChain::Absent | VMStackChain::InitialStack(_) => None,
VMStackChain::Continuation(ptr) => {
let continuation = unsafe { ptr.as_mut().unwrap() };
self.0 = continuation.parent_chain.clone();
Some(ptr)
}
}
}
}
impl Iterator for StackLimitsIterator {
type Item = *mut VMStackLimits;
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
VMStackChain::Absent => None,
VMStackChain::InitialStack(csi) => {
let stack_limits = unsafe { &mut (*csi).limits } as *mut VMStackLimits;
self.0 = VMStackChain::Absent;
Some(stack_limits)
}
VMStackChain::Continuation(ptr) => {
let continuation = unsafe { ptr.as_mut().unwrap() };
let stack_limits =
(&mut continuation.common_stack_information.limits) as *mut VMStackLimits;
self.0 = continuation.parent_chain.clone();
Some(stack_limits)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
#[repr(u32)]
pub enum VMStackState {
Fresh = wasmtime_environ::STACK_STATE_FRESH_DISCRIMINANT,
Running = wasmtime_environ::STACK_STATE_RUNNING_DISCRIMINANT,
Parent = wasmtime_environ::STACK_STATE_PARENT_DISCRIMINANT,
Suspended = wasmtime_environ::STACK_STATE_SUSPENDED_DISCRIMINANT,
Returned = wasmtime_environ::STACK_STATE_RETURNED_DISCRIMINANT,
}
#[cfg(test)]
mod tests {
use core::mem::{offset_of, size_of};
use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
use super::*;
#[test]
fn null_pointer_optimization() {
assert_eq!(size_of::<Option<VMContObj>>(), size_of::<VMContObj>());
}
#[test]
fn check_vm_stack_limits_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
offset_of!(VMStackLimits, stack_limit),
usize::from(offsets.ptr.vmstack_limits_stack_limit())
);
assert_eq!(
offset_of!(VMStackLimits, last_wasm_entry_fp),
usize::from(offsets.ptr.vmstack_limits_last_wasm_entry_fp())
);
}
#[test]
fn check_vm_common_stack_information_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
size_of::<VMCommonStackInformation>(),
usize::from(offsets.ptr.size_of_vmcommon_stack_information())
);
assert_eq!(
offset_of!(VMCommonStackInformation, limits),
usize::from(offsets.ptr.vmcommon_stack_information_limits())
);
assert_eq!(
offset_of!(VMCommonStackInformation, state),
usize::from(offsets.ptr.vmcommon_stack_information_state())
);
assert_eq!(
offset_of!(VMCommonStackInformation, handlers),
usize::from(offsets.ptr.vmcommon_stack_information_handlers())
);
assert_eq!(
offset_of!(VMCommonStackInformation, first_switch_handler_index),
usize::from(
offsets
.ptr
.vmcommon_stack_information_first_switch_handler_index()
)
);
}
#[test]
fn check_vm_array_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
size_of::<VMHostArray<()>>(),
usize::from(offsets.ptr.size_of_vmhostarray())
);
assert_eq!(
offset_of!(VMHostArray<()>, length),
usize::from(offsets.ptr.vmhostarray_length())
);
assert_eq!(
offset_of!(VMHostArray<()>, capacity),
usize::from(offsets.ptr.vmhostarray_capacity())
);
assert_eq!(
offset_of!(VMHostArray<()>, data),
usize::from(offsets.ptr.vmhostarray_data())
);
}
#[test]
fn check_vm_contobj_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
offset_of!(VMContObj, contref),
usize::from(offsets.ptr.vmcontobj_contref())
);
assert_eq!(
offset_of!(VMContObj, revision),
usize::from(offsets.ptr.vmcontobj_revision())
);
assert_eq!(
size_of::<VMContObj>(),
usize::from(offsets.ptr.size_of_vmcontobj())
)
}
#[test]
fn check_vm_contref_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
offset_of!(VMContRef, common_stack_information),
usize::from(offsets.ptr.vmcontref_common_stack_information())
);
assert_eq!(
offset_of!(VMContRef, parent_chain),
usize::from(offsets.ptr.vmcontref_parent_chain())
);
assert_eq!(
offset_of!(VMContRef, last_ancestor),
usize::from(offsets.ptr.vmcontref_last_ancestor())
);
assert_eq!(u8::vmcontref_revision(&4) % 8, 0);
assert_eq!(u8::vmcontref_revision(&8) % 8, 0);
assert_eq!(
offset_of!(VMContRef, revision),
usize::from(offsets.ptr.vmcontref_revision())
);
assert_eq!(
offset_of!(VMContRef, stack),
usize::from(offsets.ptr.vmcontref_stack())
);
assert_eq!(
offset_of!(VMContRef, args),
usize::from(offsets.ptr.vmcontref_args())
);
assert_eq!(
offset_of!(VMContRef, values),
usize::from(offsets.ptr.vmcontref_values())
);
}
#[test]
fn check_vm_stack_chain_offsets() {
let module = Module::new(StaticModuleIndex::from_u32(0));
let offsets = VMOffsets::new(HostPtr, &module);
assert_eq!(
size_of::<VMStackChain>(),
usize::from(offsets.ptr.size_of_vmstack_chain())
);
}
}