use crate::instance::InstanceData;
use crate::linker::Definition;
use crate::module::{BareModuleInfo, RegisteredModuleId};
use crate::prelude::*;
use crate::runtime::vm::mpk::{self, ProtectionKey, ProtectionMask};
use crate::runtime::vm::{
Backtrace, ExportGlobal, GcHeapAllocationIndex, GcRootsList, GcStore,
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, OnDemandInstanceAllocator,
SignalHandler, StoreBox, StorePtr, VMContext, VMFuncRef, VMGcRef, VMRuntimeLimits, WasmFault,
};
use crate::trampoline::VMHostGlobalContext;
use crate::RootSet;
use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
use crate::{Global, Instance, Memory, RootScope, Table};
use alloc::sync::Arc;
use anyhow::{anyhow, bail, Result};
use core::cell::UnsafeCell;
use core::fmt;
use core::future::Future;
use core::marker;
use core::mem::{self, ManuallyDrop};
use core::num::NonZeroU64;
use core::ops::{Deref, DerefMut};
use core::pin::Pin;
use core::ptr;
use core::sync::atomic::AtomicU64;
use core::task::{Context, Poll};
mod context;
pub use self::context::*;
mod data;
pub use self::data::*;
mod func_refs;
use func_refs::FuncRefs;
pub struct Store<T> {
inner: ManuallyDrop<Box<StoreInner<T>>>,
}
#[derive(Copy, Clone, Debug)]
pub enum CallHook {
CallingWasm,
ReturningFromWasm,
CallingHost,
ReturningFromHost,
}
impl CallHook {
pub fn entering_host(&self) -> bool {
match self {
CallHook::ReturningFromWasm | CallHook::CallingHost => true,
_ => false,
}
}
pub fn exiting_host(&self) -> bool {
match self {
CallHook::ReturningFromHost | CallHook::CallingWasm => true,
_ => false,
}
}
}
pub struct StoreInner<T> {
inner: StoreOpaque,
limiter: Option<ResourceLimiterInner<T>>,
call_hook: Option<CallHookInner<T>>,
epoch_deadline_behavior:
Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
data: ManuallyDrop<T>,
}
enum ResourceLimiterInner<T> {
Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
#[cfg(feature = "async")]
Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
}
#[cfg(feature = "async")]
#[async_trait::async_trait]
pub trait CallHookHandler<T>: Send {
async fn handle_call_event(&self, t: &mut T, ch: CallHook) -> Result<()>;
}
enum CallHookInner<T> {
Sync(Box<dyn FnMut(&mut T, CallHook) -> Result<()> + Send + Sync>),
#[cfg(feature = "async")]
Async(Box<dyn CallHookHandler<T> + Send + Sync>),
}
pub enum UpdateDeadline {
Continue(u64),
#[cfg(feature = "async")]
Yield(u64),
}
impl<T> Deref for StoreInner<T> {
type Target = StoreOpaque;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for StoreInner<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
pub struct StoreOpaque {
_marker: marker::PhantomPinned,
engine: Engine,
runtime_limits: VMRuntimeLimits,
instances: Vec<StoreInstance>,
#[cfg(feature = "component-model")]
num_component_instances: usize,
signal_handler: Option<Box<SignalHandler<'static>>>,
modules: ModuleRegistry,
func_refs: FuncRefs,
host_globals: Vec<StoreBox<VMHostGlobalContext>>,
gc_store: Option<GcStore>,
gc_roots: RootSet,
gc_roots_list: GcRootsList,
instance_count: usize,
instance_limit: usize,
memory_count: usize,
memory_limit: usize,
table_count: usize,
table_limit: usize,
#[cfg(feature = "async")]
async_state: AsyncState,
fuel_reserve: u64,
fuel_yield_interval: Option<NonZeroU64>,
store_data: ManuallyDrop<StoreData>,
default_caller: InstanceHandle,
hostcall_val_storage: Vec<Val>,
wasm_val_raw_storage: Vec<ValRaw>,
rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
pkey: Option<ProtectionKey>,
#[cfg(feature = "component-model")]
component_host_table: crate::runtime::vm::component::ResourceTable,
#[cfg(feature = "component-model")]
component_calls: crate::runtime::vm::component::CallContexts,
#[cfg(feature = "component-model")]
host_resource_data: crate::component::HostResourceData,
}
#[cfg(feature = "async")]
struct AsyncState {
current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
current_poll_cx: UnsafeCell<*mut Context<'static>>,
}
#[cfg(feature = "async")]
unsafe impl Send for AsyncState {}
#[cfg(feature = "async")]
unsafe impl Sync for AsyncState {}
#[doc(hidden)]
pub struct AutoAssertNoGc<'a> {
store: &'a mut StoreOpaque,
entered: bool,
}
impl<'a> AutoAssertNoGc<'a> {
#[inline]
pub fn new(store: &'a mut StoreOpaque) -> Self {
let entered = if let Some(gc_store) = store.gc_store.as_mut() {
gc_store.gc_heap.enter_no_gc_scope();
true
} else {
false
};
AutoAssertNoGc { store, entered }
}
}
impl core::ops::Deref for AutoAssertNoGc<'_> {
type Target = StoreOpaque;
#[inline]
fn deref(&self) -> &Self::Target {
&*self.store
}
}
impl core::ops::DerefMut for AutoAssertNoGc<'_> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.store
}
}
impl Drop for AutoAssertNoGc<'_> {
#[inline]
fn drop(&mut self) {
if self.entered {
self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
}
}
}
struct StoreInstance {
handle: InstanceHandle,
kind: StoreInstanceKind,
}
enum StoreInstanceKind {
Real {
module_id: RegisteredModuleId,
},
Dummy,
}
impl<T> Store<T> {
pub fn new(engine: &Engine, data: T) -> Self {
let pkey = engine.allocator().next_available_pkey();
let mut inner = Box::new(StoreInner {
inner: StoreOpaque {
_marker: marker::PhantomPinned,
engine: engine.clone(),
runtime_limits: Default::default(),
instances: Vec::new(),
#[cfg(feature = "component-model")]
num_component_instances: 0,
signal_handler: None,
gc_store: None,
gc_roots: RootSet::default(),
gc_roots_list: GcRootsList::default(),
modules: ModuleRegistry::default(),
func_refs: FuncRefs::default(),
host_globals: Vec::new(),
instance_count: 0,
instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
memory_count: 0,
memory_limit: crate::DEFAULT_MEMORY_LIMIT,
table_count: 0,
table_limit: crate::DEFAULT_TABLE_LIMIT,
#[cfg(feature = "async")]
async_state: AsyncState {
current_suspend: UnsafeCell::new(ptr::null_mut()),
current_poll_cx: UnsafeCell::new(ptr::null_mut()),
},
fuel_reserve: 0,
fuel_yield_interval: None,
store_data: ManuallyDrop::new(StoreData::new()),
default_caller: InstanceHandle::null(),
hostcall_val_storage: Vec::new(),
wasm_val_raw_storage: Vec::new(),
rooted_host_funcs: ManuallyDrop::new(Vec::new()),
pkey,
#[cfg(feature = "component-model")]
component_host_table: Default::default(),
#[cfg(feature = "component-model")]
component_calls: Default::default(),
#[cfg(feature = "component-model")]
host_resource_data: Default::default(),
},
limiter: None,
call_hook: None,
epoch_deadline_behavior: None,
data: ManuallyDrop::new(data),
});
inner.default_caller = {
let module = Arc::new(wasmtime_environ::Module::default());
let shim = BareModuleInfo::empty(module).into_traitobj();
let allocator = OnDemandInstanceAllocator::default();
allocator
.validate_module(shim.module(), shim.offsets())
.unwrap();
let mut instance = unsafe {
allocator
.allocate_module(InstanceAllocationRequest {
host_state: Box::new(()),
imports: Default::default(),
store: StorePtr::empty(),
runtime_info: &shim,
wmemcheck: engine.config().wmemcheck,
pkey: None,
})
.expect("failed to allocate default callee")
};
unsafe {
let traitobj = mem::transmute::<
*mut (dyn crate::runtime::vm::Store + '_),
*mut (dyn crate::runtime::vm::Store + 'static),
>(&mut *inner);
instance.set_store(traitobj);
}
instance
};
Self {
inner: ManuallyDrop::new(inner),
}
}
#[inline]
pub fn data(&self) -> &T {
self.inner.data()
}
#[inline]
pub fn data_mut(&mut self) -> &mut T {
self.inner.data_mut()
}
pub fn into_data(mut self) -> T {
unsafe {
let mut inner = ManuallyDrop::take(&mut self.inner);
core::mem::forget(self);
ManuallyDrop::take(&mut inner.data)
}
}
pub fn limiter(
&mut self,
mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
) {
let inner = &mut self.inner;
let (instance_limit, table_limit, memory_limit) = {
let l = limiter(&mut inner.data);
(l.instances(), l.tables(), l.memories())
};
let innermost = &mut inner.inner;
innermost.instance_limit = instance_limit;
innermost.table_limit = table_limit;
innermost.memory_limit = memory_limit;
inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
}
#[cfg(feature = "async")]
pub fn limiter_async(
&mut self,
mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
+ Send
+ Sync
+ 'static,
) {
debug_assert!(self.inner.async_support());
let inner = &mut self.inner;
let (instance_limit, table_limit, memory_limit) = {
let l = limiter(&mut inner.data);
(l.instances(), l.tables(), l.memories())
};
let innermost = &mut inner.inner;
innermost.instance_limit = instance_limit;
innermost.table_limit = table_limit;
innermost.memory_limit = memory_limit;
inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
}
#[cfg(feature = "async")]
pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
self.inner.call_hook = Some(CallHookInner::Async(Box::new(hook)));
}
pub fn call_hook(
&mut self,
hook: impl FnMut(&mut T, CallHook) -> Result<()> + Send + Sync + 'static,
) {
self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
}
pub fn engine(&self) -> &Engine {
self.inner.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self) {
self.inner.gc()
}
#[cfg(all(feature = "async", feature = "gc"))]
pub async fn gc_async(&mut self)
where
T: Send,
{
self.inner.gc_async().await;
}
pub fn get_fuel(&self) -> Result<u64> {
self.inner.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.inner.set_fuel(fuel)
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.inner.fuel_async_yield_interval(interval)
}
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.inner.set_epoch_deadline(ticks_beyond_current);
}
pub fn epoch_deadline_trap(&mut self) {
self.inner.epoch_deadline_trap();
}
pub fn epoch_deadline_callback(
&mut self,
callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
) {
self.inner.epoch_deadline_callback(Box::new(callback));
}
#[cfg(feature = "async")]
pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
self.inner.epoch_deadline_async_yield_and_update(delta);
}
}
impl<'a, T> StoreContext<'a, T> {
pub(crate) fn async_support(&self) -> bool {
self.0.async_support()
}
pub fn engine(&self) -> &Engine {
self.0.engine()
}
pub fn data(&self) -> &'a T {
self.0.data()
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
}
impl<'a, T> StoreContextMut<'a, T> {
pub fn data(&self) -> &T {
self.0.data()
}
pub fn data_mut(&mut self) -> &mut T {
self.0.data_mut()
}
pub fn engine(&self) -> &Engine {
self.0.engine()
}
#[cfg(feature = "gc")]
pub fn gc(&mut self) {
self.0.gc()
}
#[cfg(all(feature = "async", feature = "gc"))]
pub async fn gc_async(&mut self)
where
T: Send,
{
self.0.gc_async().await;
}
pub fn get_fuel(&self) -> Result<u64> {
self.0.get_fuel()
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
self.0.set_fuel(fuel)
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
self.0.fuel_async_yield_interval(interval)
}
pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
self.0.set_epoch_deadline(ticks_beyond_current);
}
pub fn epoch_deadline_trap(&mut self) {
self.0.epoch_deadline_trap();
}
#[cfg(feature = "async")]
pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
self.0.epoch_deadline_async_yield_and_update(delta);
}
}
impl<T> StoreInner<T> {
#[inline]
fn data(&self) -> &T {
&self.data
}
#[inline]
fn data_mut(&mut self) -> &mut T {
&mut self.data
}
#[inline]
pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
if self.inner.pkey.is_none() && self.call_hook.is_none() {
Ok(())
} else {
self.call_hook_slow_path(s)
}
}
fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
if let Some(pkey) = &self.inner.pkey {
let allocator = self.engine().allocator();
match s {
CallHook::CallingWasm | CallHook::ReturningFromHost => {
allocator.restrict_to_pkey(*pkey)
}
CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
}
}
match &mut self.call_hook {
Some(CallHookInner::Sync(hook)) => hook(&mut self.data, s),
#[cfg(feature = "async")]
Some(CallHookInner::Async(handler)) => unsafe {
Ok(self
.inner
.async_cx()
.ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
.block_on(handler.handle_call_event(&mut self.data, s).as_mut())??)
},
None => Ok(()),
}
}
}
fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
fuel_reserve.saturating_add_signed(-injected_fuel)
}
fn refuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
) -> bool {
let fuel = get_fuel(*injected_fuel, *fuel_reserve);
if fuel > 0 {
set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
true
} else {
false
}
}
fn set_fuel(
injected_fuel: &mut i64,
fuel_reserve: &mut u64,
yield_interval: Option<NonZeroU64>,
new_fuel_amount: u64,
) {
let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
let injected = core::cmp::min(interval, new_fuel_amount);
let injected = core::cmp::min(injected, i64::MAX as u64);
*fuel_reserve = new_fuel_amount - injected;
*injected_fuel = -(injected as i64);
}
#[doc(hidden)]
impl StoreOpaque {
pub fn id(&self) -> StoreId {
self.store_data.id()
}
pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
let new = slot.saturating_add(amt);
if new > max {
bail!(
"resource limit exceeded: {} count too high at {}",
desc,
new
);
}
*slot = new;
Ok(())
}
let module = module.env_module();
let memories = module.memory_plans.len() - module.num_imported_memories;
let tables = module.table_plans.len() - module.num_imported_tables;
bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
bump(
&mut self.memory_count,
self.memory_limit,
memories,
"memory",
)?;
bump(&mut self.table_count, self.table_limit, tables, "table")?;
Ok(())
}
#[inline]
pub fn async_support(&self) -> bool {
cfg!(feature = "async") && self.engine().config().async_support
}
#[inline]
pub fn engine(&self) -> &Engine {
&self.engine
}
#[inline]
pub fn store_data(&self) -> &StoreData {
&self.store_data
}
#[inline]
pub fn store_data_mut(&mut self) -> &mut StoreData {
&mut self.store_data
}
#[inline]
pub(crate) fn modules(&self) -> &ModuleRegistry {
&self.modules
}
#[inline]
pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
&mut self.modules
}
pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
&mut self.func_refs
}
pub(crate) fn fill_func_refs(&mut self) {
self.func_refs.fill(&mut self.modules);
}
pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
self.func_refs.push_instance_pre_func_refs(func_refs);
}
pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
&mut self.host_globals
}
pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
match self.instances[instance.0].kind {
StoreInstanceKind::Dummy => None,
StoreInstanceKind::Real { module_id } => {
let module = self
.modules()
.lookup_module_by_id(module_id)
.expect("should always have a registered module for real instances");
Some(module)
}
}
}
pub unsafe fn add_instance(
&mut self,
handle: InstanceHandle,
module_id: RegisteredModuleId,
) -> InstanceId {
self.instances.push(StoreInstance {
handle: handle.clone(),
kind: StoreInstanceKind::Real { module_id },
});
InstanceId(self.instances.len() - 1)
}
pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
self.instances.push(StoreInstance {
handle: handle.clone(),
kind: StoreInstanceKind::Dummy,
});
InstanceId(self.instances.len() - 1)
}
pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
&self.instances[id.0].handle
}
pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
&mut self.instances[id.0].handle
}
pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
let instances = self
.instances
.iter()
.enumerate()
.filter_map(|(idx, inst)| {
let id = InstanceId::from_index(idx);
if let StoreInstanceKind::Dummy = inst.kind {
None
} else {
Some(InstanceData::from_id(id))
}
})
.collect::<Vec<_>>();
instances
.into_iter()
.map(|i| Instance::from_wasmtime(i, self))
}
pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
let mems = self
.instances
.iter_mut()
.flat_map(|instance| instance.handle.defined_memories())
.collect::<Vec<_>>();
mems.into_iter()
.map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
}
pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
struct TempTakeInstances<'a> {
instances: Vec<StoreInstance>,
store: &'a mut StoreOpaque,
}
impl<'a> TempTakeInstances<'a> {
fn new(store: &'a mut StoreOpaque) -> Self {
let instances = mem::take(&mut store.instances);
Self { instances, store }
}
}
impl Drop for TempTakeInstances<'_> {
fn drop(&mut self) {
assert!(self.store.instances.is_empty());
self.store.instances = mem::take(&mut self.instances);
}
}
let mut temp = TempTakeInstances::new(self);
for instance in temp.instances.iter_mut() {
for table in instance.handle.defined_tables() {
let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
f(temp.store, table);
}
}
}
pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
struct TempTakeHostGlobalsAndInstances<'a> {
host_globals: Vec<StoreBox<VMHostGlobalContext>>,
instances: Vec<StoreInstance>,
store: &'a mut StoreOpaque,
}
impl<'a> TempTakeHostGlobalsAndInstances<'a> {
fn new(store: &'a mut StoreOpaque) -> Self {
let host_globals = mem::take(&mut store.host_globals);
let instances = mem::take(&mut store.instances);
Self {
host_globals,
instances,
store,
}
}
}
impl Drop for TempTakeHostGlobalsAndInstances<'_> {
fn drop(&mut self) {
assert!(self.store.host_globals.is_empty());
self.store.host_globals = mem::take(&mut self.host_globals);
assert!(self.store.instances.is_empty());
self.store.instances = mem::take(&mut self.instances);
}
}
let mut temp = TempTakeHostGlobalsAndInstances::new(self);
unsafe {
for global in temp.host_globals.iter() {
let export = ExportGlobal {
definition: &mut (*global.get()).global as *mut _,
vmctx: core::ptr::null_mut(),
global: (*global.get()).ty.to_wasm_type(),
};
let global = Global::from_wasmtime_global(export, temp.store);
f(temp.store, global);
}
for instance in temp.instances.iter_mut() {
for (_, export) in instance.handle.defined_globals() {
let global = Global::from_wasmtime_global(export, temp.store);
f(temp.store, global);
}
}
}
}
#[cfg_attr(not(target_os = "linux"), allow(dead_code))] pub fn set_signal_handler(&mut self, handler: Option<Box<SignalHandler<'static>>>) {
self.signal_handler = handler;
}
#[inline]
pub fn runtime_limits(&self) -> &VMRuntimeLimits {
&self.runtime_limits
}
#[inline(never)]
pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
assert!(self.gc_store.is_none());
let gc_store = allocate_gc_store(self.engine())?;
self.gc_store = Some(gc_store);
return Ok(());
#[cfg(feature = "gc")]
fn allocate_gc_store(engine: &Engine) -> Result<GcStore> {
let (index, heap) = if engine
.config()
.features
.contains(wasmparser::WasmFeatures::REFERENCE_TYPES)
{
engine
.allocator()
.allocate_gc_heap(&**engine.gc_runtime())?
} else {
(
GcHeapAllocationIndex::default(),
crate::runtime::vm::disabled_gc_heap(),
)
};
Ok(GcStore::new(index, heap))
}
#[cfg(not(feature = "gc"))]
fn allocate_gc_store(_engine: &Engine) -> Result<GcStore> {
Ok(GcStore::new(
GcHeapAllocationIndex::default(),
crate::runtime::vm::disabled_gc_heap(),
))
}
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn gc_store(&self) -> Result<&GcStore> {
match &self.gc_store {
Some(gc_store) => Ok(gc_store),
None => bail!("GC heap not initialized yet"),
}
}
#[inline]
pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
if self.gc_store.is_none() {
self.allocate_gc_heap()?;
}
Ok(self.unwrap_gc_store_mut())
}
#[inline]
#[cfg(feature = "gc")]
pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
self.gc_store
.as_ref()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
self.gc_store
.as_mut()
.expect("attempted to access the store's GC heap before it has been allocated")
}
#[inline]
pub(crate) fn gc_roots(&self) -> &RootSet {
&self.gc_roots
}
#[inline]
pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
&mut self.gc_roots
}
#[inline]
pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
if let Some(gc_store) = self.gc_store.as_mut() {
self.gc_roots.exit_lifo_scope(gc_store, scope);
}
}
#[cfg(feature = "gc")]
pub fn gc(&mut self) {
if self.gc_store.is_none() {
return;
}
let mut roots = core::mem::take(&mut self.gc_roots_list);
self.trace_roots(&mut roots);
self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
roots.clear();
self.gc_roots_list = roots;
}
#[inline]
#[cfg(not(feature = "gc"))]
pub fn gc(&mut self) {
}
#[cfg(feature = "gc")]
fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots");
assert!(gc_roots_list.is_empty());
self.trace_wasm_stack_roots(gc_roots_list);
self.trace_vmctx_roots(gc_roots_list);
self.trace_user_roots(gc_roots_list);
log::trace!("End trace GC roots")
}
#[cfg(all(feature = "async", feature = "gc"))]
pub async fn gc_async(&mut self) {
assert!(
self.async_support(),
"cannot use `gc_async` without enabling async support in the config",
);
if self.gc_store.is_none() {
return;
}
let mut roots = std::mem::take(&mut self.gc_roots_list);
self.trace_roots_async(&mut roots).await;
self.unwrap_gc_store_mut()
.gc_async(unsafe { roots.iter() })
.await;
roots.clear();
self.gc_roots_list = roots;
}
#[inline]
#[cfg(all(feature = "async", not(feature = "gc")))]
pub async fn gc_async(&mut self) {
}
#[cfg(all(feature = "async", feature = "gc"))]
async fn trace_roots_async(&mut self, gc_roots_list: &mut GcRootsList) {
use crate::runtime::vm::Yield;
log::trace!("Begin trace GC roots");
assert!(gc_roots_list.is_empty());
self.trace_wasm_stack_roots(gc_roots_list);
Yield::new().await;
self.trace_vmctx_roots(gc_roots_list);
Yield::new().await;
self.trace_user_roots(gc_roots_list);
log::trace!("End trace GC roots")
}
#[cfg(feature = "gc")]
fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
use core::ptr::NonNull;
use crate::runtime::vm::{ModuleInfoLookup, SendSyncPtr};
log::trace!("Begin trace GC roots :: Wasm stack");
Backtrace::trace(self.vmruntime_limits().cast_const(), |frame| {
let pc = frame.pc();
debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
let fp = frame.fp();
debug_assert!(
fp != 0,
"we should always get a valid frame pointer for Wasm frames"
);
let module_info = self
.modules()
.lookup(pc)
.expect("should have module info for Wasm frame");
let stack_map = match module_info.lookup_stack_map(pc) {
Some(sm) => sm,
None => {
log::trace!("No stack map for this Wasm frame");
return core::ops::ControlFlow::Continue(());
}
};
log::trace!(
"We have a stack map that maps {} words in this Wasm frame",
stack_map.mapped_words()
);
let sp = fp - stack_map.mapped_words() as usize * mem::size_of::<usize>();
for i in 0..(stack_map.mapped_words() as usize) {
let stack_slot = sp + i * mem::size_of::<usize>();
let stack_slot = stack_slot as *mut u64;
if !stack_map.get_bit(i) {
log::trace!("Stack slot @ {stack_slot:p} does not contain gc_refs");
continue;
}
let gc_ref = unsafe { core::ptr::read(stack_slot) };
log::trace!("Stack slot @ {stack_slot:p} = {gc_ref:#x}");
let gc_ref = VMGcRef::from_r64(gc_ref)
.expect("we should never use the high 32 bits of an r64");
if gc_ref.is_some() {
unsafe {
gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
NonNull::new(stack_slot).unwrap(),
));
}
}
}
core::ops::ControlFlow::Continue(())
});
log::trace!("End trace GC roots :: Wasm stack");
}
#[cfg(feature = "gc")]
fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: vmctx");
self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
log::trace!("End trace GC roots :: vmctx");
}
#[cfg(feature = "gc")]
fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
log::trace!("Begin trace GC roots :: user");
self.gc_roots.trace_roots(gc_roots_list);
log::trace!("End trace GC roots :: user");
}
#[cfg(feature = "async")]
#[inline]
pub fn async_cx(&self) -> Option<AsyncCx> {
assert!(self.async_support());
let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
if poll_cx_box_ptr.is_null() {
return None;
}
let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
if poll_cx_inner_ptr.is_null() {
return None;
}
Some(AsyncCx {
current_suspend: self.async_state.current_suspend.get(),
current_poll_cx: poll_cx_box_ptr,
track_pkey_context_switch: self.pkey.is_some(),
})
}
pub fn get_fuel(&self) -> Result<u64> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() };
Ok(get_fuel(injected_fuel, self.fuel_reserve))
}
fn refuel(&mut self) -> bool {
let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
refuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
)
}
pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
set_fuel(
injected_fuel,
&mut self.fuel_reserve,
self.fuel_yield_interval,
fuel,
);
Ok(())
}
pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
anyhow::ensure!(
self.engine().tunables().consume_fuel,
"fuel is not configured in this store"
);
anyhow::ensure!(
self.engine().config().async_support,
"async support is not configured in this store"
);
anyhow::ensure!(
interval != Some(0),
"fuel_async_yield_interval must not be 0"
);
self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
self.set_fuel(self.get_fuel()?)
}
#[cfg(feature = "async")]
fn async_yield_impl(&mut self) -> Result<()> {
use crate::runtime::vm::Yield;
let mut future = Yield::new();
unsafe {
self.async_cx()
.expect("attempted to pull async context during shutdown")
.block_on(Pin::new_unchecked(&mut future))
}
}
#[inline]
pub fn signal_handler(&self) -> Option<*const SignalHandler<'static>> {
let handler = self.signal_handler.as_ref()?;
Some(&**handler as *const _)
}
#[inline]
pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
&self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits
}
#[inline]
pub fn default_caller(&self) -> *mut VMContext {
self.default_caller.vmctx()
}
pub fn traitobj(&self) -> *mut dyn crate::runtime::vm::Store {
self.default_caller.store()
}
#[inline]
pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
mem::take(&mut self.hostcall_val_storage)
}
#[inline]
pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
if storage.capacity() > self.hostcall_val_storage.capacity() {
self.hostcall_val_storage = storage;
}
}
#[inline]
pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
mem::take(&mut self.wasm_val_raw_storage)
}
#[inline]
pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
if storage.capacity() > self.wasm_val_raw_storage.capacity() {
self.wasm_val_raw_storage = storage;
}
}
pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
self.rooted_host_funcs.push(funcs);
}
pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<WasmFault> {
if addr <= mem::size_of::<VMFuncRef>() {
const _: () = {
assert!(mem::size_of::<VMFuncRef>() <= 512);
};
return None;
}
let mut fault = None;
for instance in self.instances.iter() {
if let Some(f) = instance.handle.wasm_fault(addr) {
assert!(fault.is_none());
fault = Some(f);
}
}
if fault.is_some() {
return fault;
}
cfg_if::cfg_if! {
if #[cfg(any(feature = "std", unix, windows))] {
eprintln!(
"\
Wasmtime caught a segfault for a wasm program because the faulting instruction
is allowed to segfault due to how linear memories are implemented. The address
that was accessed, however, is not known to any linear memory in use within this
Store. This may be indicative of a critical bug in Wasmtime's code generation
because all addresses which are known to be reachable from wasm won't reach this
message.
pc: 0x{pc:x}
address: 0x{addr:x}
This is a possible security issue because WebAssembly has accessed something it
shouldn't have been able to. Other accesses may have succeeded and this one just
happened to be caught. The process will now be aborted to prevent this damage
from going any further and to alert what's going on. If this is a security
issue please reach out to the Wasmtime team via its security policy
at https://bytecodealliance.org/security.
"
);
std::process::abort();
} else if #[cfg(panic = "abort")] {
let _ = pc;
panic!("invalid fault");
} else {
compile_error!("either `std` or `panic=abort` must be enabled");
None
}
}
}
#[inline]
pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
self.pkey.clone()
}
#[inline]
#[cfg(feature = "component-model")]
pub(crate) fn component_resource_state(
&mut self,
) -> (
&mut crate::runtime::vm::component::CallContexts,
&mut crate::runtime::vm::component::ResourceTable,
&mut crate::component::HostResourceData,
) {
(
&mut self.component_calls,
&mut self.component_host_table,
&mut self.host_resource_data,
)
}
#[cfg(feature = "component-model")]
pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
let _ = instance;
self.num_component_instances += 1;
}
}
impl<T> StoreContextMut<'_, T> {
#[cfg(feature = "async")]
pub(crate) async fn on_fiber<R>(
&mut self,
func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
) -> Result<R>
where
T: Send,
{
let config = self.engine().config();
debug_assert!(self.0.async_support());
debug_assert!(config.async_stack_size > 0);
let mut slot = None;
let future = {
let current_poll_cx = self.0.async_state.current_poll_cx.get();
let current_suspend = self.0.async_state.current_suspend.get();
let stack = self.engine().allocator().allocate_fiber_stack()?;
let engine = self.engine().clone();
let slot = &mut slot;
let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
keep_going?;
unsafe {
let _reset = Reset(current_suspend, *current_suspend);
*current_suspend = suspend;
*slot = Some(func(self));
Ok(())
}
})?;
FiberFuture {
fiber: Some(fiber),
current_poll_cx,
engine,
state: Some(crate::runtime::vm::AsyncWasmCallState::new()),
}
};
future.await?;
return Ok(slot.unwrap());
struct FiberFuture<'a> {
fiber: Option<wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>>,
current_poll_cx: *mut *mut Context<'static>,
engine: Engine,
state: Option<crate::runtime::vm::AsyncWasmCallState>,
}
unsafe impl Send for FiberFuture<'_> {}
impl FiberFuture<'_> {
fn fiber(&self) -> &wasmtime_fiber::Fiber<'_, Result<()>, (), Result<()>> {
self.fiber.as_ref().unwrap()
}
fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
unsafe {
let prev = self.state.take().unwrap().push();
let restore = Restore {
fiber: self,
state: Some(prev),
};
return restore.fiber.fiber().resume(val);
}
struct Restore<'a, 'b> {
fiber: &'a mut FiberFuture<'b>,
state: Option<crate::runtime::vm::PreviousAsyncWasmCallState>,
}
impl Drop for Restore<'_, '_> {
fn drop(&mut self) {
unsafe {
self.fiber.state = Some(self.state.take().unwrap().restore());
}
}
}
}
}
impl Future for FiberFuture<'_> {
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
unsafe {
let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
*self.current_poll_cx =
core::mem::transmute::<&mut Context<'_>, *mut Context<'static>>(cx);
match self.resume(Ok(())) {
Ok(result) => Poll::Ready(result),
Err(()) => {
if let Some(range) = self.fiber().stack().range() {
crate::runtime::vm::AsyncWasmCallState::assert_current_state_not_in_range(range);
}
Poll::Pending
}
}
}
}
}
impl Drop for FiberFuture<'_> {
fn drop(&mut self) {
if !self.fiber().done() {
let result = self.resume(Err(anyhow!("future dropped")));
debug_assert!(result.is_ok());
}
self.state.take().unwrap().assert_null();
unsafe {
self.engine
.allocator()
.deallocate_fiber_stack(self.fiber.take().unwrap().into_stack());
}
}
}
}
}
#[cfg(feature = "async")]
pub struct AsyncCx {
current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
current_poll_cx: *mut *mut Context<'static>,
track_pkey_context_switch: bool,
}
#[cfg(feature = "async")]
impl AsyncCx {
pub unsafe fn block_on<U>(
&self,
mut future: Pin<&mut (dyn Future<Output = U> + Send)>,
) -> Result<U> {
let suspend = *self.current_suspend;
let _reset = Reset(self.current_suspend, suspend);
*self.current_suspend = ptr::null_mut();
assert!(!suspend.is_null());
loop {
let future_result = {
let poll_cx = *self.current_poll_cx;
let _reset = Reset(self.current_poll_cx, poll_cx);
*self.current_poll_cx = ptr::null_mut();
assert!(!poll_cx.is_null());
future.as_mut().poll(&mut *poll_cx)
};
match future_result {
Poll::Ready(t) => break Ok(t),
Poll::Pending => {}
}
let previous_mask = if self.track_pkey_context_switch {
let previous_mask = mpk::current_mask();
mpk::allow(ProtectionMask::all());
previous_mask
} else {
ProtectionMask::all()
};
(*suspend).suspend(())?;
if self.track_pkey_context_switch {
mpk::allow(previous_mask);
}
}
}
}
unsafe impl<T> crate::runtime::vm::Store for StoreInner<T> {
fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
<StoreOpaque>::vmruntime_limits(self)
}
fn epoch_ptr(&self) -> *const AtomicU64 {
self.engine.epoch_counter() as *const _
}
fn maybe_gc_store(&mut self) -> Option<&mut GcStore> {
self.gc_store.as_mut()
}
fn memory_growing(
&mut self,
current: usize,
desired: usize,
maximum: Option<usize>,
) -> Result<bool, anyhow::Error> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).memory_growing(current, desired, maximum)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
self.inner
.async_cx()
.expect("ResourceLimiterAsync requires async Store")
.block_on(
limiter(&mut self.data)
.memory_growing(current, desired, maximum)
.as_mut(),
)?
},
None => Ok(true),
}
}
fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).memory_grow_failed(error)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => {
limiter(&mut self.data).memory_grow_failed(error)
}
None => {
log::debug!("ignoring memory growth failure error: {error:?}");
Ok(())
}
}
}
fn table_growing(
&mut self,
current: u32,
desired: u32,
maximum: Option<u32>,
) -> Result<bool, anyhow::Error> {
#[cfg(feature = "async")]
let async_cx = if self.async_support()
&& matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
{
Some(self.async_cx().unwrap())
} else {
None
};
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).table_growing(current, desired, maximum)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
async_cx
.expect("ResourceLimiterAsync requires async Store")
.block_on(
limiter(&mut self.data)
.table_growing(current, desired, maximum)
.as_mut(),
)?
},
None => Ok(true),
}
}
fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
match self.limiter {
Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
limiter(&mut self.data).table_grow_failed(error)
}
#[cfg(feature = "async")]
Some(ResourceLimiterInner::Async(ref mut limiter)) => {
limiter(&mut self.data).table_grow_failed(error)
}
None => {
log::debug!("ignoring table growth failure: {error:?}");
Ok(())
}
}
}
fn out_of_gas(&mut self) -> Result<()> {
if !self.refuel() {
return Err(Trap::OutOfFuel).err2anyhow();
}
#[cfg(feature = "async")]
if self.fuel_yield_interval.is_some() {
self.async_yield_impl()?;
}
Ok(())
}
fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
let mut behavior = self.epoch_deadline_behavior.take();
let delta_result = match &mut behavior {
None => Err(Trap::Interrupt).err2anyhow(),
Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
let delta = match update {
UpdateDeadline::Continue(delta) => delta,
#[cfg(feature = "async")]
UpdateDeadline::Yield(delta) => {
assert!(
self.async_support(),
"cannot use `UpdateDeadline::Yield` without enabling async support in the config"
);
self.async_yield_impl()?;
delta
}
};
self.set_epoch_deadline(delta);
Ok(self.get_epoch_deadline())
})
};
self.epoch_deadline_behavior = behavior;
delta_result
}
#[cfg(feature = "gc")]
fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
let mut scope = RootScope::new(self);
let store = scope.as_context_mut().0;
let store_id = store.id();
let root = root.map(|r| store.gc_roots_mut().push_lifo_root(store_id, r));
if store.async_support() {
#[cfg(feature = "async")]
unsafe {
let async_cx = store.async_cx();
let mut future = store.gc_async();
async_cx
.expect("attempted to pull async context during shutdown")
.block_on(Pin::new_unchecked(&mut future))?;
}
} else {
(**store).gc();
}
let root = match root {
None => None,
Some(r) => {
let r = r
.unchecked_get_gc_ref(store)
.expect("still in scope")
.unchecked_copy();
Some(store.gc_store_mut()?.clone_gc_ref(&r))
}
};
Ok(root)
}
#[cfg(not(feature = "gc"))]
fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
Ok(root)
}
#[cfg(feature = "component-model")]
fn component_calls(&mut self) -> &mut crate::runtime::vm::component::CallContexts {
&mut self.component_calls
}
}
impl<T> StoreInner<T> {
pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
*epoch_deadline = self.engine().current_epoch() + delta;
}
fn epoch_deadline_trap(&mut self) {
self.epoch_deadline_behavior = None;
}
fn epoch_deadline_callback(
&mut self,
callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
) {
self.epoch_deadline_behavior = Some(callback);
}
fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
assert!(
self.async_support(),
"cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
);
#[cfg(feature = "async")]
{
self.epoch_deadline_behavior =
Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
}
let _ = delta; }
fn get_epoch_deadline(&self) -> u64 {
let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
*epoch_deadline
}
}
impl<T: Default> Default for Store<T> {
fn default() -> Store<T> {
Store::new(&Engine::default(), T::default())
}
}
impl<T: fmt::Debug> fmt::Debug for Store<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = &**self.inner as *const StoreInner<T>;
f.debug_struct("Store")
.field("inner", &inner)
.field("data", &self.inner.data)
.finish()
}
}
impl<T> Drop for Store<T> {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.inner.data);
ManuallyDrop::drop(&mut self.inner);
}
}
}
impl Drop for StoreOpaque {
fn drop(&mut self) {
unsafe {
let allocator = self.engine.allocator();
let ondemand = OnDemandInstanceAllocator::default();
for instance in self.instances.iter_mut() {
if let StoreInstanceKind::Dummy = instance.kind {
ondemand.deallocate_module(&mut instance.handle);
} else {
allocator.deallocate_module(&mut instance.handle);
}
}
ondemand.deallocate_module(&mut self.default_caller);
#[cfg(feature = "gc")]
if let Some(gc_store) = self.gc_store.take() {
allocator.deallocate_gc_heap(gc_store.allocation_index, gc_store.gc_heap);
}
#[cfg(feature = "component-model")]
{
for _ in 0..self.num_component_instances {
allocator.decrement_component_instance_count();
}
}
ManuallyDrop::drop(&mut self.store_data);
ManuallyDrop::drop(&mut self.rooted_host_funcs);
}
}
}
impl crate::runtime::vm::ModuleInfoLookup for ModuleRegistry {
fn lookup(&self, pc: usize) -> Option<&dyn crate::runtime::vm::ModuleInfo> {
self.lookup_module_info(pc)
}
}
struct Reset<T: Copy>(*mut T, T);
impl<T: Copy> Drop for Reset<T> {
fn drop(&mut self) {
unsafe {
*self.0 = self.1;
}
}
}
#[cfg(test)]
mod tests {
use super::{get_fuel, refuel, set_fuel};
use std::num::NonZeroU64;
struct FuelTank {
pub consumed_fuel: i64,
pub reserve_fuel: u64,
pub yield_interval: Option<NonZeroU64>,
}
impl FuelTank {
fn new() -> Self {
FuelTank {
consumed_fuel: 0,
reserve_fuel: 0,
yield_interval: None,
}
}
fn get_fuel(&self) -> u64 {
get_fuel(self.consumed_fuel, self.reserve_fuel)
}
fn refuel(&mut self) -> bool {
refuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
)
}
fn set_fuel(&mut self, fuel: u64) {
set_fuel(
&mut self.consumed_fuel,
&mut self.reserve_fuel,
self.yield_interval,
fuel,
);
}
}
#[test]
fn smoke() {
let mut tank = FuelTank::new();
tank.set_fuel(10);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 0);
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(25);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, 15);
}
#[test]
fn does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
tank.set_fuel(i64::MAX as u64);
assert_eq!(tank.get_fuel(), i64::MAX as u64);
tank.set_fuel(i64::MAX as u64 + 1);
assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
}
#[test]
fn yielding_does_not_lose_precision() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -10);
assert_eq!(tank.reserve_fuel, u64::MAX - 10);
tank.yield_interval = NonZeroU64::new(u64::MAX);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
tank.set_fuel(u64::MAX);
assert_eq!(tank.get_fuel(), u64::MAX);
assert_eq!(tank.consumed_fuel, -i64::MAX);
assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
}
#[test]
fn refueling() {
let mut tank = FuelTank::new();
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 42;
tank.consumed_fuel = 4;
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 28);
assert_eq!(tank.consumed_fuel, -10);
tank.yield_interval = NonZeroU64::new(1);
tank.reserve_fuel = 8;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 4);
assert!(tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, -1);
assert_eq!(tank.get_fuel(), 4);
tank.yield_interval = NonZeroU64::new(10);
tank.reserve_fuel = 3;
tank.consumed_fuel = 4;
assert_eq!(tank.get_fuel(), 0);
assert!(!tank.refuel());
assert_eq!(tank.reserve_fuel, 3);
assert_eq!(tank.consumed_fuel, 4);
assert_eq!(tank.get_fuel(), 0);
}
}