use crate::RootSet;
#[cfg(feature = "component-model-async")]
use crate::component::ComponentStoreData;
#[cfg(feature = "async")]
use crate::fiber;
use crate::module::RegisteredModuleId;
use crate::prelude::*;
#[cfg(feature = "gc")]
use crate::runtime::vm::GcRootsList;
#[cfg(feature = "stack-switching")]
use crate::runtime::vm::VMContRef;
use crate::runtime::vm::mpk::ProtectionKey;
use crate::runtime::vm::{
self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
SignalHandler, StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext,
};
use crate::trampoline::VMHostGlobalContext;
use crate::{Engine, Module, Trap, Val, ValRaw, module::ModuleRegistry};
use crate::{Global, Instance, Memory, Table, Uninhabited};
use alloc::sync::Arc;
use core::fmt;
use core::marker;
use core::mem::{self, ManuallyDrop};
use core::num::NonZeroU64;
use core::ops::{Deref, DerefMut};
use core::pin::Pin;
use core::ptr::NonNull;
use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
mod context;
pub use self::context::*;
mod data;
pub use self::data::*;
mod func_refs;
use func_refs::FuncRefs;
#[cfg(feature = "async")]
mod token;
#[cfg(feature = "async")]
pub(crate) use token::StoreToken;
#[cfg(feature = "async")]
mod async_;
#[cfg(all(feature = "async", feature = "call-hook"))]
pub use self::async_::CallHookHandler;
#[cfg(feature = "gc")]
mod gc;
pub struct Store<T: 'static> {
inner: ManuallyDrop<Box<StoreInner<T>>>,
}
#[derive(Copy, Clone, Debug)]
pub enum CallHook {
CallingWasm,
ReturningFromWasm,
CallingHost,
ReturningFromHost,
}
impl CallHook {
pub fn entering_host(&self) -> bool {
match self {
CallHook::ReturningFromWasm | CallHook::CallingHost => true,
_ => false,
}
}
pub fn exiting_host(&self) -> bool {
match self {
CallHook::ReturningFromHost | CallHook::CallingWasm => true,
_ => false,
}
}
}
pub struct StoreInner<T: 'static> {
inner: StoreOpaque,
limiter: Option<ResourceLimiterInner<T>>,
call_hook: Option<CallHookInner<T>>,
#[cfg(target_has_atomic = "64")]
epoch_deadline_behavior:
Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
data: ManuallyDrop<T>,
}
enum ResourceLimiterInner<T> {
Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
#[cfg(feature = "async")]
Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
}
enum CallHookInner<T: 'static> {
#[cfg(feature = "call-hook")]
Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
#[cfg(all(feature = "async", feature = "call-hook"))]
Async(Box<dyn CallHookHandler<T> + Send + Sync>),
#[allow(dead_code)]
ForceTypeParameterToBeUsed {
uninhabited: Uninhabited,
_marker: marker::PhantomData<T>,
},
}
#[non_exhaustive]
pub enum UpdateDeadline {
Continue(u64),
#[cfg(feature = "async")]
Yield(u64),
#[cfg(feature = "async")]
YieldCustom(
u64,
::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
),
}
impl<T> Deref for StoreInner<T> {
type Target = StoreOpaque;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for StoreInner<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
pub struct StoreOpaque {
_marker: marker::PhantomPinned,
engine: Engine,
vm_store_context: VMStoreContext,
#[cfg(feature = "stack-switching")]
continuations: Vec<Box<VMContRef>>,
instances: PrimaryMap<InstanceId, StoreInstance>,
#[cfg(feature = "component-model")]
num_component_instances: usize,
signal_handler: Option<SignalHandler>,
modules: ModuleRegistry,
func_refs: FuncRefs,
host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
gc_store: Option<GcStore>,
gc_roots: RootSet,
#[cfg(feature = "gc")]
gc_roots_list: GcRootsList,
#[cfg(feature = "gc")]
gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
instance_count: usize,
instance_limit: usize,
memory_count: usize,
memory_limit: usize,
table_count: usize,
table_limit: usize,
#[cfg(feature = "async")]
async_state: fiber::AsyncState,
fuel_reserve: u64,
fuel_yield_interval: Option<NonZeroU64>,
store_data: StoreData,
traitobj: StorePtr,
default_caller_vmctx: SendSyncPtr<VMContext>,
hostcall_val_storage: Vec<Val>,
wasm_val_raw_storage: Vec<ValRaw>,
pkey: Option<ProtectionKey>,
#[cfg(feature = "component-model")]
component_host_table: vm::component::ResourceTable,
#[cfg(feature = "component-model")]
component_calls: vm::component::CallContexts,
#[cfg(feature = "component-model")]
host_resource_data: crate::component::HostResourceData,
executor: Executor,
}
pub(crate) enum Executor {
Interpreter(Interpreter),
#[cfg(has_host_compiler_backend)]
Native,
}
impl Executor {
pub(crate) fn new(engine: &Engine) -> Self {
#[cfg(has_host_compiler_backend)]
if cfg!(feature = "pulley") && engine.target().is_pulley() {
Executor::Interpreter(Interpreter::new(engine))
} else {
Executor::Native
}
#[cfg(not(has_host_compiler_backend))]
{
debug_assert!(engine.target().is_pulley());
Executor::Interpreter(Interpreter::new(engine))
}
}
}
pub(crate) enum ExecutorRef<'a> {
Interpreter(InterpreterRef<'a>),
#[cfg(has_host_compiler_backend)]
Native,
}
#[doc(hidden)]
pub struct AutoAssertNoGc<'a> {
store: &'a mut StoreOpaque,
entered: bool,
}
impl<'a> AutoAssertNoGc<'a> {
#[inline]
pub fn new(store: &'a mut StoreOpaque) -> Self {
let entered = if !cfg!(feature = "gc") {
false
} else if let Some(gc_store) = store.gc_store.as_mut() {
gc_store.gc_heap.enter_no_gc_scope();
true
} else {
false
};
AutoAssertNoGc { store, entered }
}
#[inline]
pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
if cfg!(debug_assertions) {
AutoAssertNoGc::new(store)
} else {
AutoAssertNoGc {
store,
entered: false,
}
}
}
}
impl core::ops::Deref for AutoAssertNoGc<'_> {
type Target = StoreOpaque;
#[inline]
fn deref(&self) -> &Self::Target {
&*self.store
}
}
impl core::ops::DerefMut for AutoAssertNoGc<'_> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.store
}
}
impl Drop for AutoAssertNoGc<'_> {
#[inline]
fn drop(&mut self) {
if self.entered {
self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
}
}
}
struct StoreInstance {
handle: InstanceHandle,
kind: StoreInstanceKind,
}
enum StoreInstanceKind {
Real {
module_id: RegisteredModuleId,
},
Dummy,
}
impl<T> Store<T> {
pub fn new(engine: &Engine, data: T) -> Self {
let store_data = StoreData::new();
log::trace!("creating new store {:?}", store_data.id());
let pkey = engine.allocator().next_available_pkey();
let inner = StoreOpaque {
_marker: marker::PhantomPinned,
engine: engine.clone(),
vm_store_context: Default::default(),
#[cfg(feature = "stack-switching")]
continuations: Vec::new(),
instances: PrimaryMap::new(),
#[cfg(feature = "component-model")]
num_component_instances: 0,
signal_handler: None,
gc_store: None,
gc_roots: RootSet::default(),
#[cfg(feature = "gc")]
gc_roots_list: GcRootsList::default(),
#[cfg(feature = "gc")]
gc_host_alloc_types: Default::default(),
modules: ModuleRegistry::default(),
func_refs: FuncRefs::default(),
host_globals: PrimaryMap::new(),
instance_count: 0,
instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
memory_count: 0,
memory_limit: crate::DEFAULT_MEMORY_LIMIT,
table_count: 0,
table_limit: crate::DEFAULT_TABLE_LIMIT,
#[cfg(feature = "async")]
async_state: Default::default(),
fuel_reserve: 0,
fuel_yield_interval: None,
store_data,
traitobj: StorePtr::empty(),
default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
hostcall_val_storage: Vec::new(),
wasm_val_raw_storage: Vec::new(),
pkey,
#[cfg(feature = "component-model")]
component_host_table: Default::default(),
#[cfg(feature = "component-model")]
component_calls: Default::default(),
#[cfg(feature = "component-model")]
host_resource_data: Default::default(),
executor: Executor::new(engine),
};
let mut inner = Box::new(StoreInner {
inner,
limiter: None,
call_hook: None,
#[cfg(target_has_atomic = "64")]
epoch_deadline_behavior: None,
data: ManuallyDrop::new(data),
});
inner.traitobj = StorePtr::new(NonNull::from(&mut *inner));
let module = Arc::new(wasmtime_environ::Module::default());
let shim = ModuleRuntimeInfo::bare(module);
let allocator = OnDemandInstanceAllocator::default();
allocator
.validate_module(shim.env_module(), shim.offsets())
.unwrap();
unsafe {
let id = inner
.allocate_instance(
AllocateInstanceKind::Dummy {
allocator: &allocator,
},
&shim,
Default::default(),
)
.expect("failed to allocate default callee");
let default_caller_vmctx = inner.instance(id).vmctx();
inner.default_caller_vmctx = default_caller_vmctx.into();
}
Self {
inner: ManuallyDrop::new(inner),
}
}
#[inline]
pub fn data(&self) -> &T {
self.inner.data()
}
#[inline]
pub fn data_mut(&mut self) -> &mut T {
self.inner.data_mut()
}
fn run_manual_drop_routines(&mut self) {
#[cfg(feature = "component-model-async")]
ComponentStoreData::drop_fibers(&mut self.inner);
self.inner.flush_fiber_stack();
}
pub fn into_data(mut self) -> T {
self.run_manual_drop_routines();
unsafe {
let mut inner = ManuallyDrop::take(&mut self.inner);
core::mem::forget(self);
ManuallyDrop::take(&mut inner.data)
}
}
pub fn limiter(
&mut self,
mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
) {
let inner = &mut self.inner;
let (instance_limit, table_limit, memory_limit) = {
let l = limiter(&mut inner.data);
(l.instances(), l.tables(), l.memories())
};
let innermost = &mut inner.inner;
innermost.instance_limit = instance_limit;
innermost.table_limit = table_limit;
innermost.memory_limit = memory_limit;
inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
}
#[cfg(feature = "call-hook")]
pub fn call_hook(
&mut self,
hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
) {
self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
}
pub fn engine(&self) -> &Engine {
self.inner.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
assert!(!self.inner.async_support());
self.inner.gc(why);
}
pub fn get_fuel(&self) -> Result<u64> {
self.inner.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.inner.set_fuel(fuel)
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.inner.fuel_async_yield_interval(interval)
}
#[cfg(target_has_atomic = "64")]
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.inner.set_epoch_deadline(ticks_beyond_current);
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_trap(&mut self) {
self.inner.epoch_deadline_trap();
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_callback(
&mut self,
callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
) {
self.inner.epoch_deadline_callback(Box::new(callback));
}
}
impl<'a, T> StoreContext<'a, T> {
pub(crate) fn async_support(&self) -> bool {
self.0.async_support()
}
pub fn engine(&self) -> &Engine {
self.0.engine()
}
pub fn data(&self) -> &'a T {
self.0.data()
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
}
impl<'a, T> StoreContextMut<'a, T> {
pub fn data(&self) -> &T {
self.0.data()
}
pub fn data_mut(&mut self) -> &mut T {
self.0.data_mut()
}
pub fn engine(&self) -> &Engine {
self.0.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
self.0.gc(why);
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.0.set_fuel(fuel)
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.0.fuel_async_yield_interval(interval)
}
#[cfg(target_has_atomic = "64")]
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.0.set_epoch_deadline(ticks_beyond_current);
}
#[cfg(target_has_atomic = "64")]
pub fn epoch_deadline_trap(&mut self) {
self.0.epoch_deadline_trap();
}
}
impl<T> StoreInner<T> {
#[inline]
fn data(&self) -> &T {
&self.data
}
#[inline]
fn data_mut(&mut self) -> &mut T {
&mut self.data
}
#[inline]
pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
if self.inner.pkey.is_none() && self.call_hook.is_none() {
Ok(())
} else {
self.call_hook_slow_path(s)
}
}
fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
if let Some(pkey) = &self.inner.pkey {
let allocator = self.engine().allocator();
match s {
CallHook::CallingWasm | CallHook::ReturningFromHost => {
allocator.restrict_to_pkey(*pkey)
}
CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
}
}
#[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
if let Some(mut call_hook) = self.call_hook.take() {
let result = self.invoke_call_hook(&mut call_hook, s);
self.call_hook = Some(call_hook);
return result;
}
Ok(())
}
fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
match call_hook {
#[cfg(feature = "call-hook")]
CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
#[cfg(all(feature = "async", feature = "call-hook"))]
CallHookInner::Async(handler) => {
if !self.can_block() {
bail!("couldn't grab async_cx for call hook")
}
return (&mut *self)
.as_context_mut()
.with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
}
CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
let _ = s;
match *uninhabited {}
}
}
}
#[cfg(not(feature = "async"))]
fn flush_fiber_stack(&mut self) {
}
}
fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
fuel_reserve.saturating_add_signed(-injected_fuel)
}
fn refuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
) -> bool {
let fuel = get_fuel(*injected_fuel, *fuel_reserve);
if fuel > 0 {
set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
true
} else {
false
}
}
fn set_fuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
new_fuel_amount: u64,
) {
let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
let injected = core::cmp::min(interval, new_fuel_amount);
let injected = core::cmp::min(injected, i64::MAX as u64);
*fuel_reserve = new_fuel_amount - injected;
*injected_fuel = -(injected as i64);
}
#[doc(hidden)]
impl StoreOpaque {
pub fn id(&self) -> StoreId {
self.store_data.id()
}
pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
let new = slot.saturating_add(amt);
if new > max {
bail!(
"resource limit exceeded: {} count too high at {}",
desc,
new
);
}
*slot = new;
Ok(())
}
let module = module.env_module();
let memories = module.num_defined_memories();
let tables = module.num_defined_tables();
bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
bump(
&mut self.memory_count,
self.memory_limit,
memories,
"memory",
)?;
bump(&mut self.table_count, self.table_limit, tables, "table")?;
Ok(())
}
#[inline]
pub fn async_support(&self) -> bool {
cfg!(feature = "async") && self.engine().config().async_support
}
#[inline]
pub fn engine(&self) -> &Engine {
&self.engine
}
#[inline]
pub fn store_data(&self) -> &StoreData {
&self.store_data
}
#[inline]
pub fn store_data_mut(&mut self) -> &mut StoreData {
&mut self.store_data
}
#[inline]
pub(crate) fn modules(&self) -> &ModuleRegistry {
&self.modules
}
#[inline]
pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
&mut self.modules
}
pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
(&mut self.func_refs, &self.modules)
}
pub(crate) fn host_globals(
&self,
) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
&self.host_globals
}
pub(crate) fn host_globals_mut(
&mut self,
) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
&mut self.host_globals
}
pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
instance.store_id().assert_belongs_to(self.id());
match self.instances[instance.instance()].kind {
StoreInstanceKind::Dummy => None,
StoreInstanceKind::Real { module_id } => {
let module = self
.modules()
.lookup_module_by_id(module_id)
.expect("should always have a registered module for real instances");
Some(module)
}
}
}
#[inline]
pub fn instance(&self, id: InstanceId) -> &vm::Instance {
self.instances[id].handle.get()
}
#[inline]
pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
self.instances[id].handle.get_mut()
}
pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
let instances = self
.instances
.iter()
.filter_map(|(id, inst)| {
if let StoreInstanceKind::Dummy = inst.kind {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
instances
.into_iter()
.map(|i| Instance::from_wasmtime(i, self))
}
pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
let mems = self
.instances
.iter_mut()
.flat_map(|(_, instance)| instance.handle.get().defined_memories())
.collect::<Vec<_>>();
mems.into_iter()
.map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
}
pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
for id in self.instances.keys() {
let instance = StoreInstanceId::new(self.id(), id);
for table in 0..self.instance(id).env_module().num_defined_tables() {
let table = DefinedTableIndex::new(table);
f(self, Table::from_raw(instance, table));
}
}
}
pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
for global in self.host_globals.keys() {
let global = Global::new_host(self, global);
f(self, global);
}
for id in self.instances.keys() {
for index in 0..self.instance(id).env_module().num_defined_globals() {
let index = DefinedGlobalIndex::new(index);
let global = Global::new_instance(self, id, index);
f(self, global);
}
}
}
#[cfg_attr(not(target_os = "linux"), allow(dead_code))] pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
self.signal_handler = handler;
}
#[inline]
pub fn vm_store_context(&self) -> &VMStoreContext {
&self.vm_store_context
}
#[inline]
pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
&mut self.vm_store_context
}
#[inline(never)]
pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
log::trace!("allocating GC heap for store {:?}", self.id());
assert!(self.gc_store.is_none());
assert_eq!(
self.vm_store_context.gc_heap.base.as_non_null(),
NonNull::dangling(),
);
assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
let vmstore = self.traitobj();
let gc_store = allocate_gc_store(self.engine(), vmstore, self.get_pkey())?;
self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
self.gc_store = Some(gc_store);
return Ok(());
#[cfg(feature = "gc")]
fn allocate_gc_store(
engine: &Engine,
vmstore: NonNull<dyn vm::VMStore>,
pkey: Option<ProtectionKey>,
) -> Result<GcStore> {
use wasmtime_environ::packed_option::ReservedValue;
ensure!(
engine.features().gc_types(),
"cannot allocate a GC store when GC is disabled at configuration time"
);
let mut request = InstanceAllocationRequest {
id: InstanceId::reserved_value(),
runtime_info: &ModuleRuntimeInfo::bare(Arc::new(
wasmtime_environ::Module::default(),
)),
imports: vm::Imports::default(),
store: StorePtr::new(vmstore),
wmemcheck: false,
pkey,
tunables: engine.tunables(),
};
let mem_ty = engine.tunables().gc_heap_memory_type();
let tunables = engine.tunables();
let (mem_alloc_index, mem) = unsafe {
engine
.allocator()
.allocate_memory(&mut request, &mem_ty, tunables, None)?
};
let gc_runtime = engine
.gc_runtime()
.context("no GC runtime: GC disabled at compile time or configuration time")?;
let (index, heap) =
engine
.allocator()
.allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
Ok(GcStore::new(index, heap))
}
#[cfg(not(feature = "gc"))]
fn allocate_gc_store(
_engine: &Engine,
_vmstore: NonNull<dyn vm::VMStore>,
_pkey: Option<ProtectionKey>,
) -> Result<GcStore> {
bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
}
}
#[inline]
pub(crate) fn gc_store(&self) -> Result<&GcStore> {
match &self.gc_store {
Some(gc_store) => Ok(gc_store),
None => bail!("GC heap not initialized yet"),
}
}
#[inline]
pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
if self.gc_store.is_none() {
self.allocate_gc_heap()?;
}
Ok(self.unwrap_gc_store_mut())
}
#[inline]
pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
debug_assert!(self.gc_store.is_none());
None
} else {
self.gc_store.as_mut()
}
}
#[inline]
#[track_caller]
#[cfg(feature = "gc")]
pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
self.gc_store
.as_ref()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
#[track_caller]
pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
self.gc_store
.as_mut()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
pub(crate) fn gc_roots(&self) -> &RootSet {
&self.gc_roots
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
&mut self.gc_roots
}
#[inline]
pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
}
#[cfg(feature = "gc")]
fn do_gc(&mut self) {
assert!(
!self.async_support(),
"must use `store.gc_async()` instead of `store.gc()` for async stores"
);
if self.gc_store.is_none() {
return;
}
log::trace!("============ Begin GC ===========");
let mut roots = core::mem::take(&mut self.gc_roots_list);
self.trace_roots(&mut roots);
self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
roots.clear();
self.gc_roots_list = roots;
log::trace!("============ End GC ===========");
}
#[cfg(feature = "gc")]
fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots");
assert!(gc_roots_list.is_empty());
self.trace_wasm_stack_roots(gc_roots_list);
#[cfg(feature = "stack-switching")]
self.trace_wasm_continuation_roots(gc_roots_list);
self.trace_vmctx_roots(gc_roots_list);
self.trace_user_roots(gc_roots_list);
log::trace!("End trace GC roots")
}
#[cfg(feature = "gc")]
fn trace_wasm_stack_frame(
&self,
gc_roots_list: &mut GcRootsList,
frame: crate::runtime::vm::Frame,
) {
use crate::runtime::vm::SendSyncPtr;
use core::ptr::NonNull;
let pc = frame.pc();
debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
let fp = frame.fp() as *mut usize;
debug_assert!(
!fp.is_null(),
"we should always get a valid frame pointer for Wasm frames"
);
let module_info = self
.modules()
.lookup_module_by_pc(pc)
.expect("should have module info for Wasm frame");
let stack_map = match module_info.lookup_stack_map(pc) {
Some(sm) => sm,
None => {
log::trace!("No stack map for this Wasm frame");
return;
}
};
log::trace!(
"We have a stack map that maps {} bytes in this Wasm frame",
stack_map.frame_size()
);
let sp = unsafe { stack_map.sp(fp) };
for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
let raw: u32 = unsafe { core::ptr::read(stack_slot) };
log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
let gc_ref = VMGcRef::from_raw_u32(raw);
if gc_ref.is_some() {
unsafe {
gc_roots_list
.add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
}
}
}
}
#[cfg(feature = "gc")]
fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
use crate::runtime::vm::Backtrace;
log::trace!("Begin trace GC roots :: Wasm stack");
Backtrace::trace(self, |frame| {
self.trace_wasm_stack_frame(gc_roots_list, frame);
core::ops::ControlFlow::Continue(())
});
log::trace!("End trace GC roots :: Wasm stack");
}
#[cfg(all(feature = "gc", feature = "stack-switching"))]
fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
use crate::{runtime::vm::Backtrace, vm::VMStackState};
log::trace!("Begin trace GC roots :: continuations");
for continuation in &self.continuations {
let state = continuation.common_stack_information.state;
match state {
VMStackState::Suspended => {
Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
self.trace_wasm_stack_frame(gc_roots_list, frame);
core::ops::ControlFlow::Continue(())
});
}
VMStackState::Running => {
}
VMStackState::Parent => {
}
VMStackState::Fresh | VMStackState::Returned => {
}
}
}
log::trace!("End trace GC roots :: continuations");
}
#[cfg(feature = "gc")]
fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: vmctx");
self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
log::trace!("End trace GC roots :: vmctx");
}
#[cfg(feature = "gc")]
fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: user");
self.gc_roots.trace_roots(gc_roots_list);
log::trace!("End trace GC roots :: user");
}
#[cfg(feature = "gc")]
pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
self.gc_host_alloc_types.insert(ty);
}
pub fn get_fuel(&self) -> Result<u64> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
Ok(get_fuel(injected_fuel, self.fuel_reserve))
}
fn refuel(&mut self) -> bool {
let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
refuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
)
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
set_fuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
fuel,
);
Ok(())
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
anyhow::ensure!(
self.engine().config().async_support,
"async support is not configured in this store"
);
anyhow::ensure!(
interval != Some(0),
"fuel_async_yield_interval must not be 0"
);
self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
self.set_fuel(self.get_fuel()?)
}
#[inline]
pub fn signal_handler(&self) -> Option<*const SignalHandler> {
let handler = self.signal_handler.as_ref()?;
Some(handler)
}
#[inline]
pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
NonNull::from(&self.vm_store_context)
}
#[inline]
pub fn default_caller(&self) -> NonNull<VMContext> {
self.default_caller_vmctx.as_non_null()
}
#[inline]
pub fn traitobj(&self) -> NonNull<dyn vm::VMStore> {
self.traitobj.as_raw().unwrap()
}
#[inline]
pub fn traitobj_mut(&mut self) -> &mut dyn vm::VMStore {
unsafe { self.traitobj().as_mut() }
}
#[inline]
pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
mem::take(&mut self.hostcall_val_storage)
}
#[inline]
pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
if storage.capacity() > self.hostcall_val_storage.capacity() {
self.hostcall_val_storage = storage;
}
}
#[inline]
pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
mem::take(&mut self.wasm_val_raw_storage)
}
#[inline]
pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
if storage.capacity() > self.wasm_val_raw_storage.capacity() {
self.wasm_val_raw_storage = storage;
}
}
pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
if addr <= mem::size_of::<VMFuncRef>() {
const _: () = {
assert!(mem::size_of::<VMFuncRef>() <= 512);
};
return None;
}
let mut fault = None;
for (_, instance) in self.instances.iter() {
if let Some(f) = instance.handle.get().wasm_fault(addr) {
assert!(fault.is_none());
fault = Some(f);
}
}
if fault.is_some() {
return fault;
}
cfg_if::cfg_if! {
if #[cfg(feature = "std")] {
eprintln!(
"\
Wasmtime caught a segfault for a wasm program because the faulting instruction
is allowed to segfault due to how linear memories are implemented. The address
that was accessed, however, is not known to any linear memory in use within this
Store. This may be indicative of a critical bug in Wasmtime's code generation
because all addresses which are known to be reachable from wasm won't reach this
message.
pc: 0x{pc:x}
address: 0x{addr:x}
This is a possible security issue because WebAssembly has accessed something it
shouldn't have been able to. Other accesses may have succeeded and this one just
happened to be caught. The process will now be aborted to prevent this damage
from going any further and to alert what's going on. If this is a security
issue please reach out to the Wasmtime team via its security policy
at https://bytecodealliance.org/security.
"
);
std::process::abort();
} else if #[cfg(panic = "abort")] {
let _ = pc;
panic!("invalid fault");
} else {
let _ = pc;
struct PanicAgainOnDrop;
impl Drop for PanicAgainOnDrop {
fn drop(&mut self) {
panic!("panicking again to trigger a process abort");
}
}
let _bomb = PanicAgainOnDrop;
panic!("invalid fault");
}
}
}
#[inline]
pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
self.pkey
}
#[inline]
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state(
&mut self,
) -> (
&mut vm::component::CallContexts,
&mut vm::component::ResourceTable,
&mut crate::component::HostResourceData,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
)
}
#[cfg(feature = "component-model")]
pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
let _ = instance;
self.num_component_instances += 1;
}
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state_with_instance(
&mut self,
instance: crate::component::Instance,
) -> (
&mut vm::component::CallContexts,
&mut vm::component::ResourceTable,
&mut crate::component::HostResourceData,
Pin<&mut vm::component::ComponentInstance>,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
instance.id().from_data_get_mut(&mut self.store_data),
)
}
#[cfg(feature = "async")]
pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
&mut self.async_state
}
#[cfg(feature = "async")]
pub(crate) fn has_pkey(&self) -> bool {
self.pkey.is_some()
}
pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
match &mut self.executor {
Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
#[cfg(has_host_compiler_backend)]
Executor::Native => ExecutorRef::Native,
}
}
#[cfg(feature = "async")]
pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
mem::swap(&mut self.executor, executor);
}
pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
match &self.executor {
Executor::Interpreter(i) => i.unwinder(),
#[cfg(has_host_compiler_backend)]
Executor::Native => &vm::UnwindHost,
}
}
#[cfg(feature = "stack-switching")]
pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
let mut continuation = Box::new(VMContRef::empty());
let stack_size = self.engine.config().async_stack_size;
let stack = crate::vm::VMContinuationStack::new(stack_size)?;
continuation.stack = stack;
let ptr = continuation.deref_mut() as *mut VMContRef;
self.continuations.push(continuation);
Ok(ptr)
}
pub(crate) unsafe fn allocate_instance(
&mut self,
kind: AllocateInstanceKind<'_>,
runtime_info: &ModuleRuntimeInfo,
imports: Imports<'_>,
) -> Result<InstanceId> {
let id = self.instances.next_key();
let allocator = match kind {
AllocateInstanceKind::Module(_) => self.engine().allocator(),
AllocateInstanceKind::Dummy { allocator } => allocator,
};
let handle = allocator.allocate_module(InstanceAllocationRequest {
id,
runtime_info,
imports,
store: StorePtr::new(self.traitobj()),
wmemcheck: self.engine().config().wmemcheck,
pkey: self.get_pkey(),
tunables: self.engine().tunables(),
})?;
let actual = match kind {
AllocateInstanceKind::Module(module_id) => {
log::trace!(
"Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
self.id()
);
self.instances.push(StoreInstance {
handle,
kind: StoreInstanceKind::Real { module_id },
})
}
AllocateInstanceKind::Dummy { .. } => {
log::trace!(
"Adding dummy instance to store: store={:?}, instance={id:?}",
self.id()
);
self.instances.push(StoreInstance {
handle,
kind: StoreInstanceKind::Dummy,
})
}
};
assert_eq!(id, actual);
Ok(id)
}
pub(crate) unsafe fn vmctx_id(&self, vmctx: NonNull<VMContext>) -> StoreInstanceId {
let instance_id = vm::Instance::from_vmctx(vmctx, |i| i.id());
StoreInstanceId::new(self.id(), instance_id)
}
}
pub(crate) enum AllocateInstanceKind<'a> {
Module(RegisteredModuleId),
Dummy {
allocator: &'a dyn InstanceAllocator,
},
}
unsafe impl<T> vm::VMStore for StoreInner<T> {
#[cfg(feature = "component-model-async")]
fn component_async_store(
&mut self,
) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
self
}
fn store_opaque(&self) -> &StoreOpaque {
&self.inner
}
fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
&mut self.inner
}
fn memory_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, anyhow::Error> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).memory_growing(current, desired, maximum)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
let limiter = match &mut store.0.limiter {
Some(ResourceLimiterInner::Async(limiter)) => limiter,
_ => unreachable!(),
};
limiter(&mut store.0.data).memory_growing(current, desired, maximum)
})?,
None => Ok(true),
}
}
fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).memory_grow_failed(error)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => {
limiter(&mut self.data).memory_grow_failed(error)
}
None => {
log::debug!("ignoring memory growth failure error: {error:?}");
Ok(())
}
}
}
fn table_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, anyhow::Error> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).table_growing(current, desired, maximum)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
let limiter = match &mut store.0.limiter {
Some(ResourceLimiterInner::Async(limiter)) => limiter,
_ => unreachable!(),
};
limiter(&mut store.0.data).table_growing(current, desired, maximum)
})?,
None => Ok(true),
}
}
fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).table_grow_failed(error)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => {
limiter(&mut self.data).table_grow_failed(error)
}
None => {
log::debug!("ignoring table growth failure: {error:?}");
Ok(())
}
}
}
fn out_of_gas(&mut self) -> Result<()> {
if !self.refuel() {
return Err(Trap::OutOfFuel.into());
}
#[cfg(feature = "async")]
if self.fuel_yield_interval.is_some() {
self.async_yield_impl()?;
}
Ok(())
}
#[cfg(target_has_atomic = "64")]
fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
let mut behavior = self.epoch_deadline_behavior.take();
let delta_result = match &mut behavior {
None => Err(Trap::Interrupt.into()),
Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
let delta = match update {
UpdateDeadline::Continue(delta) => delta,
#[cfg(feature = "async")]
UpdateDeadline::Yield(delta) => {
assert!(
self.async_support(),
"cannot use `UpdateDeadline::Yield` without enabling async support in the config"
);
self.async_yield_impl()?;
delta
}
#[cfg(feature = "async")]
UpdateDeadline::YieldCustom(delta, future) => {
assert!(
self.async_support(),
"cannot use `UpdateDeadline::YieldCustom` without enabling async support in the config"
);
self.block_on(|_| future)?;
delta
}
};
self.set_epoch_deadline(delta);
Ok(self.get_epoch_deadline())
})
};
self.epoch_deadline_behavior = behavior;
delta_result
}
#[cfg(feature = "gc")]
unsafe fn maybe_async_grow_or_collect_gc_heap(
&mut self,
root: Option<VMGcRef>,
bytes_needed: Option<u64>,
) -> Result<Option<VMGcRef>> {
self.inner.maybe_async_gc(root, bytes_needed)
}
#[cfg(not(feature = "gc"))]
unsafe fn maybe_async_grow_or_collect_gc_heap(
&mut self,
root: Option<VMGcRef>,
_bytes_needed: Option<u64>,
) -> Result<Option<VMGcRef>> {
Ok(root)
}
#[cfg(feature = "component-model")]
fn component_calls(&mut self) -> &mut vm::component::CallContexts {
&mut self.component_calls
}
}
impl<T> StoreInner<T> {
#[cfg(target_has_atomic = "64")]
pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
let current_epoch = self.engine().current_epoch();
let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
*epoch_deadline = current_epoch + delta;
}
#[cfg(target_has_atomic = "64")]
fn epoch_deadline_trap(&mut self) {
self.epoch_deadline_behavior = None;
}
#[cfg(target_has_atomic = "64")]
fn epoch_deadline_callback(
&mut self,
callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
) {
self.epoch_deadline_behavior = Some(callback);
}
fn get_epoch_deadline(&mut self) -> u64 {
*self.vm_store_context.epoch_deadline.get_mut()
}
}
impl<T: Default> Default for Store<T> {
fn default() -> Store<T> {
Store::new(&Engine::default(), T::default())
}
}
impl<T: fmt::Debug> fmt::Debug for Store<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &**self.inner as *const StoreInner<T>;
f.debug_struct("Store")
.field("inner", &inner)
.field("data", &self.inner.data)
.finish()
}
}
impl<T> Drop for Store<T> {
fn drop(&mut self) {
self.run_manual_drop_routines();
unsafe {
ManuallyDrop::drop(&mut self.inner.data);
ManuallyDrop::drop(&mut self.inner);
}
}
}
impl Drop for StoreOpaque {
fn drop(&mut self) {
unsafe {
let allocator = self.engine.allocator();
let ondemand = OnDemandInstanceAllocator::default();
let store_id = self.id();
#[cfg(feature = "gc")]
if let Some(gc_store) = self.gc_store.take() {
let gc_alloc_index = gc_store.allocation_index;
log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
debug_assert!(self.engine.features().gc_types());
let (mem_alloc_index, mem) =
allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
allocator.deallocate_memory(None, mem_alloc_index, mem);
}
for (id, instance) in self.instances.iter_mut() {
log::trace!("store {store_id:?} is deallocating {id:?}");
if let StoreInstanceKind::Dummy = instance.kind {
ondemand.deallocate_module(&mut instance.handle);
} else {
allocator.deallocate_module(&mut instance.handle);
}
}
#[cfg(feature = "component-model")]
{
for _ in 0..self.num_component_instances {
allocator.decrement_component_instance_count();
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::{get_fuel, refuel, set_fuel};
use std::num::NonZeroU64;
struct FuelTank {
pub consumed_fuel: i64,
pub reserve_fuel: u64,
pub yield_interval: Option<NonZeroU64>,
}
impl FuelTank {
fn new() -> Self {
FuelTank {
consumed_fuel: 0,
reserve_fuel: 0,
yield_interval: None,
}
}
fn get_fuel(&self) -> u64 {
get_fuel(self.consumed_fuel, self.reserve_fuel)
}
fn refuel(&mut self) -> bool {
refuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
)
}
fn set_fuel(&mut self, fuel: u64) {
set_fuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
fuel,
);
}
}
#[test]
fn smoke() {
let mut tank = FuelTank::new();
tank.set_fuel(10);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 0);
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(25);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 15);
}
#[test]
fn does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
tank.set_fuel(i64::MAX as u64);
assert_eq!(tank.get_fuel(), i64::MAX as u64);
tank.set_fuel(i64::MAX as u64 + 1);
assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
}
#[test]
fn yielding_does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, u64::MAX - 10);
tank.yield_interval = NonZeroU64::new(u64::MAX);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
}
#[test]
fn refueling() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 42;
tank.consumed_fuel = 4;
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 28);
assert_eq!(tank.consumed_fuel, -10);
tank.yield_interval = NonZeroU64::new(1);
tank.reserve_fuel = 8;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 4);
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, -1);
assert_eq!(tank.get_fuel(), 4);
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 3;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 0);
assert!(!tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, 4);
assert_eq!(tank.get_fuel(), 0);
}
}