use crate::Result;
use crate::component::{Component, Instance, InstancePre, ResourceType, RuntimeImport};
use crate::module::ModuleRegistry;
#[cfg(feature = "component-model-async")]
use crate::runtime::component::concurrent::ConcurrentInstanceState;
use crate::runtime::component::{ComponentInstanceId, RuntimeInstance};
use crate::runtime::vm::instance::{InstanceLayout, OwnedInstance, OwnedVMContext};
use crate::runtime::vm::vmcontext::VMFunctionBody;
use crate::runtime::vm::{
HostResult, SendSyncPtr, VMArrayCallFunction, VMFuncRef, VMGlobalDefinition,
VMMemoryDefinition, VMOpaqueContext, VMStore, VMStoreRawPtr, VMTableImport, VMWasmCallFunction,
ValRaw, VmPtr, VmSafe, catch_unwind_and_record_trap,
};
use crate::store::InstanceId;
use crate::{Func, vm};
use alloc::alloc::Layout;
use alloc::sync::Arc;
use core::mem;
use core::mem::offset_of;
use core::pin::Pin;
use core::ptr::NonNull;
use wasmtime_environ::component::*;
use wasmtime_environ::error::OutOfMemory;
use wasmtime_environ::{HostPtr, PrimaryMap, VMSharedTypeIndex};
#[allow(
clippy::cast_possible_truncation,
reason = "it's intended this is truncated on 32-bit platforms"
)]
const INVALID_PTR: usize = 0xdead_dead_beef_beef_u64 as usize;
mod handle_table;
mod libcalls;
mod resources;
pub use self::handle_table::{HandleTable, RemovedResource};
#[cfg(feature = "component-model-async")]
pub use self::handle_table::{ThreadHandleTable, TransmitLocalState, Waitable};
pub use self::resources::{CallContext, ResourceTables, TypedResource, TypedResourceIndex};
#[derive(Default)]
pub struct InstanceState {
#[cfg(feature = "component-model-async")]
concurrent_state: ConcurrentInstanceState,
handle_table: HandleTable,
#[cfg(feature = "component-model-async")]
thread_handle_table: ThreadHandleTable,
}
impl InstanceState {
#[cfg(feature = "component-model-async")]
pub fn concurrent_state(&mut self) -> &mut ConcurrentInstanceState {
&mut self.concurrent_state
}
pub fn handle_table(&mut self) -> &mut HandleTable {
&mut self.handle_table
}
#[cfg(feature = "component-model-async")]
pub fn thread_handle_table(&mut self) -> &mut ThreadHandleTable {
&mut self.thread_handle_table
}
}
#[repr(C)]
pub struct ComponentInstance {
id: ComponentInstanceId,
offsets: VMComponentOffsets<HostPtr>,
component: Component,
instance_states: PrimaryMap<RuntimeComponentInstanceIndex, InstanceState>,
instances: PrimaryMap<RuntimeInstanceIndex, InstanceId>,
resource_types: Arc<PrimaryMap<ResourceIndex, ResourceType>>,
imports: Arc<PrimaryMap<RuntimeImportIndex, RuntimeImport>>,
store: VMStoreRawPtr,
vmctx: OwnedVMContext<VMComponentContext>,
}
pub type VMLoweringCallee = unsafe extern "C" fn(
vmctx: NonNull<VMOpaqueContext>,
data: NonNull<u8>,
ty: u32,
options: u32,
args_and_results: NonNull<mem::MaybeUninit<ValRaw>>,
nargs_and_results: usize,
) -> bool;
#[repr(transparent)]
pub struct VMLoweringFunction(VMFunctionBody);
#[derive(Copy, Clone)]
#[repr(C)]
pub struct VMLowering {
pub callee: VmPtr<VMLoweringFunction>,
pub data: VmPtr<u8>,
}
unsafe impl VmSafe for VMLowering {}
#[repr(C)]
#[repr(align(16))]
pub struct VMComponentContext;
impl ComponentInstance {
pub unsafe fn enter_host_from_wasm<R>(
vmctx: NonNull<VMComponentContext>,
f: impl FnOnce(&mut dyn VMStore, Instance) -> R,
) -> R::Abi
where
R: HostResult,
{
let mut ptr = unsafe { Self::from_vmctx(vmctx) };
let reference = unsafe { ptr.as_mut() };
let store = unsafe { &mut *reference.store.0.as_ptr() };
let instance = Instance::from_wasmtime(store, reference.id);
catch_unwind_and_record_trap(store, |store| f(store, instance))
}
unsafe fn from_vmctx(vmctx: NonNull<VMComponentContext>) -> NonNull<ComponentInstance> {
unsafe {
vmctx
.byte_sub(mem::size_of::<ComponentInstance>())
.cast::<ComponentInstance>()
}
}
pub(crate) unsafe fn vmctx_instance_id(
vmctx: NonNull<VMComponentContext>,
) -> ComponentInstanceId {
unsafe { Self::from_vmctx(vmctx).as_ref().id }
}
fn alloc_layout(offsets: &VMComponentOffsets<HostPtr>) -> Layout {
let size = mem::size_of::<Self>()
.checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
.unwrap();
let align = mem::align_of::<Self>();
Layout::from_size_align(size, align).unwrap()
}
pub(crate) fn new(
id: ComponentInstanceId,
component: &Component,
resource_types: Arc<PrimaryMap<ResourceIndex, ResourceType>>,
imports: &Arc<PrimaryMap<RuntimeImportIndex, RuntimeImport>>,
store: NonNull<dyn VMStore>,
) -> Result<OwnedComponentInstance, OutOfMemory> {
let offsets = VMComponentOffsets::new(HostPtr, component.env_component());
let num_instances = component.env_component().num_runtime_component_instances;
let mut instance_states = PrimaryMap::with_capacity(num_instances.try_into().unwrap());
for _ in 0..num_instances {
instance_states.push(InstanceState::default());
}
let mut ret = OwnedInstance::new(ComponentInstance {
id,
offsets,
instance_states,
instances: PrimaryMap::with_capacity(
component
.env_component()
.num_runtime_instances
.try_into()
.unwrap(),
),
component: component.clone(),
resource_types,
imports: imports.clone(),
store: VMStoreRawPtr(store),
vmctx: OwnedVMContext::new(),
})?;
unsafe {
ret.get_mut().initialize_vmctx();
}
Ok(ret)
}
#[inline]
pub fn vmctx(&self) -> NonNull<VMComponentContext> {
InstanceLayout::vmctx(self)
}
#[inline]
pub fn instance_flags(&self, instance: RuntimeComponentInstanceIndex) -> InstanceFlags {
unsafe {
let ptr = self
.vmctx_plus_offset_raw::<VMGlobalDefinition>(self.offsets.instance_flags(instance));
InstanceFlags(SendSyncPtr::new(ptr))
}
}
pub fn runtime_memory(&self, idx: RuntimeMemoryIndex) -> NonNull<VMMemoryDefinition> {
unsafe {
let ret = *self.vmctx_plus_offset::<VmPtr<_>>(self.offsets.runtime_memory(idx));
debug_assert!(ret.as_ptr() as usize != INVALID_PTR);
ret.as_non_null()
}
}
pub fn runtime_table(&self, idx: RuntimeTableIndex) -> VMTableImport {
unsafe {
let ret = *self.vmctx_plus_offset::<VMTableImport>(self.offsets.runtime_table(idx));
debug_assert!(ret.from.as_ptr() as usize != INVALID_PTR);
debug_assert!(ret.vmctx.as_ptr() as usize != INVALID_PTR);
ret
}
}
pub fn index_runtime_func_table(
&self,
registry: &ModuleRegistry,
table_idx: RuntimeTableIndex,
func_idx: u64,
) -> Result<Option<Func>> {
unsafe {
let store = self.store.0.as_ref();
let table = self.runtime_table(table_idx);
let vmctx = table.vmctx.as_non_null();
let mut instance_ptr = vm::Instance::from_vmctx(vmctx);
let instance = Pin::new_unchecked(instance_ptr.as_mut());
let table =
instance.get_defined_table_with_lazy_init(registry, table.index, [func_idx]);
let func = table
.get_func(func_idx)?
.map(|funcref| Func::from_vm_func_ref(store.id(), funcref));
Ok(func)
}
}
pub fn runtime_realloc(&self, idx: RuntimeReallocIndex) -> NonNull<VMFuncRef> {
unsafe {
let ret = *self.vmctx_plus_offset::<VmPtr<_>>(self.offsets.runtime_realloc(idx));
debug_assert!(ret.as_ptr() as usize != INVALID_PTR);
ret.as_non_null()
}
}
pub fn runtime_callback(&self, idx: RuntimeCallbackIndex) -> NonNull<VMFuncRef> {
unsafe {
let ret = *self.vmctx_plus_offset::<VmPtr<_>>(self.offsets.runtime_callback(idx));
debug_assert!(ret.as_ptr() as usize != INVALID_PTR);
ret.as_non_null()
}
}
pub fn runtime_post_return(&self, idx: RuntimePostReturnIndex) -> NonNull<VMFuncRef> {
unsafe {
let ret = *self.vmctx_plus_offset::<VmPtr<_>>(self.offsets.runtime_post_return(idx));
debug_assert!(ret.as_ptr() as usize != INVALID_PTR);
ret.as_non_null()
}
}
pub fn lowering(&self, idx: LoweredIndex) -> VMLowering {
unsafe {
let ret = *self.vmctx_plus_offset::<VMLowering>(self.offsets.lowering(idx));
debug_assert!(ret.callee.as_ptr() as usize != INVALID_PTR);
debug_assert!(ret.data.as_ptr() as usize != INVALID_PTR);
ret
}
}
pub fn trampoline_func_ref(&self, idx: TrampolineIndex) -> NonNull<VMFuncRef> {
unsafe {
let offset = self.offsets.trampoline_func_ref(idx);
let ret = self.vmctx_plus_offset_raw::<VMFuncRef>(offset);
debug_assert!(
mem::transmute::<Option<VmPtr<VMWasmCallFunction>>, usize>(ret.as_ref().wasm_call)
!= INVALID_PTR
);
debug_assert!(ret.as_ref().vmctx.as_ptr() as usize != INVALID_PTR);
ret
}
}
pub fn unsafe_intrinsic_func_ref(&self, idx: UnsafeIntrinsic) -> NonNull<VMFuncRef> {
unsafe {
let offset = self.offsets.unsafe_intrinsic_func_ref(idx);
let ret = self.vmctx_plus_offset_raw::<VMFuncRef>(offset);
debug_assert!(
mem::transmute::<Option<VmPtr<VMWasmCallFunction>>, usize>(ret.as_ref().wasm_call)
!= INVALID_PTR
);
debug_assert!(ret.as_ref().vmctx.as_ptr() as usize != INVALID_PTR);
ret
}
}
pub fn set_runtime_memory(
self: Pin<&mut Self>,
idx: RuntimeMemoryIndex,
ptr: NonNull<VMMemoryDefinition>,
) {
unsafe {
let offset = self.offsets.runtime_memory(idx);
let storage = self.vmctx_plus_offset_mut::<VmPtr<VMMemoryDefinition>>(offset);
debug_assert!((*storage).as_ptr() as usize == INVALID_PTR);
*storage = ptr.into();
}
}
pub fn set_runtime_realloc(
self: Pin<&mut Self>,
idx: RuntimeReallocIndex,
ptr: NonNull<VMFuncRef>,
) {
unsafe {
let offset = self.offsets.runtime_realloc(idx);
let storage = self.vmctx_plus_offset_mut::<VmPtr<VMFuncRef>>(offset);
debug_assert!((*storage).as_ptr() as usize == INVALID_PTR);
*storage = ptr.into();
}
}
pub fn set_runtime_callback(
self: Pin<&mut Self>,
idx: RuntimeCallbackIndex,
ptr: NonNull<VMFuncRef>,
) {
unsafe {
let offset = self.offsets.runtime_callback(idx);
let storage = self.vmctx_plus_offset_mut::<VmPtr<VMFuncRef>>(offset);
debug_assert!((*storage).as_ptr() as usize == INVALID_PTR);
*storage = ptr.into();
}
}
pub fn set_runtime_post_return(
self: Pin<&mut Self>,
idx: RuntimePostReturnIndex,
ptr: NonNull<VMFuncRef>,
) {
unsafe {
let offset = self.offsets.runtime_post_return(idx);
let storage = self.vmctx_plus_offset_mut::<VmPtr<VMFuncRef>>(offset);
debug_assert!((*storage).as_ptr() as usize == INVALID_PTR);
*storage = ptr.into();
}
}
pub fn set_runtime_table(self: Pin<&mut Self>, idx: RuntimeTableIndex, import: VMTableImport) {
unsafe {
let offset = self.offsets.runtime_table(idx);
let storage = self.vmctx_plus_offset_mut::<VMTableImport>(offset);
debug_assert!((*storage).vmctx.as_ptr() as usize == INVALID_PTR);
debug_assert!((*storage).from.as_ptr() as usize == INVALID_PTR);
*storage = import;
}
}
pub fn set_lowering(self: Pin<&mut Self>, idx: LoweredIndex, lowering: VMLowering) {
unsafe {
let callee = self.offsets.lowering_callee(idx);
debug_assert!(*self.vmctx_plus_offset::<usize>(callee) == INVALID_PTR);
let data = self.offsets.lowering_data(idx);
debug_assert!(*self.vmctx_plus_offset::<usize>(data) == INVALID_PTR);
let offset = self.offsets.lowering(idx);
*self.vmctx_plus_offset_mut(offset) = lowering;
}
}
pub fn set_trampoline(
self: Pin<&mut Self>,
idx: TrampolineIndex,
wasm_call: NonNull<VMWasmCallFunction>,
array_call: NonNull<VMArrayCallFunction>,
type_index: VMSharedTypeIndex,
) {
unsafe {
let offset = self.offsets.trampoline_func_ref(idx);
debug_assert!(*self.vmctx_plus_offset::<usize>(offset) == INVALID_PTR);
let vmctx = VMOpaqueContext::from_vmcomponent(self.vmctx());
*self.vmctx_plus_offset_mut(offset) = VMFuncRef {
wasm_call: Some(wasm_call.into()),
array_call: array_call.into(),
type_index,
vmctx: vmctx.into(),
};
}
}
pub fn set_intrinsic(
self: Pin<&mut Self>,
intrinsic: UnsafeIntrinsic,
wasm_call: NonNull<VMWasmCallFunction>,
array_call: NonNull<VMArrayCallFunction>,
type_index: VMSharedTypeIndex,
) {
unsafe {
let offset = self.offsets.unsafe_intrinsic_func_ref(intrinsic);
debug_assert!(*self.vmctx_plus_offset::<usize>(offset) == INVALID_PTR);
let vmctx = VMOpaqueContext::from_vmcomponent(self.vmctx());
*self.vmctx_plus_offset_mut(offset) = VMFuncRef {
wasm_call: Some(wasm_call.into()),
array_call: array_call.into(),
type_index,
vmctx: vmctx.into(),
};
}
}
pub fn set_resource_destructor(
self: Pin<&mut Self>,
idx: ResourceIndex,
dtor: Option<NonNull<VMFuncRef>>,
) {
unsafe {
let offset = self.offsets.resource_destructor(idx);
debug_assert!(*self.vmctx_plus_offset::<usize>(offset) == INVALID_PTR);
*self.vmctx_plus_offset_mut(offset) = dtor.map(VmPtr::from);
}
}
pub fn resource_destructor(&self, idx: ResourceIndex) -> Option<NonNull<VMFuncRef>> {
unsafe {
let offset = self.offsets.resource_destructor(idx);
debug_assert!(*self.vmctx_plus_offset::<usize>(offset) != INVALID_PTR);
(*self.vmctx_plus_offset::<Option<VmPtr<VMFuncRef>>>(offset)).map(|p| p.as_non_null())
}
}
unsafe fn initialize_vmctx(mut self: Pin<&mut Self>) {
let offset = self.offsets.magic();
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = VMCOMPONENT_MAGIC;
}
static BUILTINS: libcalls::VMComponentBuiltins = libcalls::VMComponentBuiltins::INIT;
let ptr = BUILTINS.expose_provenance();
let offset = self.offsets.builtins();
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = VmPtr::from(ptr);
}
let offset = self.offsets.vm_store_context();
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) =
VmPtr::from(self.store.0.as_ref().vm_store_context_ptr());
}
for i in 0..self.offsets.num_runtime_component_instances {
let i = RuntimeComponentInstanceIndex::from_u32(i);
let mut def = VMGlobalDefinition::new();
unsafe {
*def.as_i32_mut() = FLAG_MAY_LEAVE;
self.instance_flags(i).as_raw().write(def);
}
}
if cfg!(debug_assertions) {
for i in 0..self.offsets.num_lowerings {
let i = LoweredIndex::from_u32(i);
let offset = self.offsets.lowering_callee(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
let offset = self.offsets.lowering_data(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_trampolines {
let i = TrampolineIndex::from_u32(i);
let offset = self.offsets.trampoline_func_ref(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_unsafe_intrinsics {
let i = UnsafeIntrinsic::from_u32(i);
let offset = self.offsets.unsafe_intrinsic_func_ref(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_runtime_memories {
let i = RuntimeMemoryIndex::from_u32(i);
let offset = self.offsets.runtime_memory(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_runtime_reallocs {
let i = RuntimeReallocIndex::from_u32(i);
let offset = self.offsets.runtime_realloc(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_runtime_callbacks {
let i = RuntimeCallbackIndex::from_u32(i);
let offset = self.offsets.runtime_callback(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_runtime_post_returns {
let i = RuntimePostReturnIndex::from_u32(i);
let offset = self.offsets.runtime_post_return(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_resources {
let i = ResourceIndex::from_u32(i);
let offset = self.offsets.resource_destructor(i);
unsafe {
*self.as_mut().vmctx_plus_offset_mut(offset) = INVALID_PTR;
}
}
for i in 0..self.offsets.num_runtime_tables {
let i = RuntimeTableIndex::from_u32(i);
let offset = self.offsets.runtime_table(i);
#[allow(clippy::cast_possible_truncation, reason = "known to not overflow")]
unsafe {
*self.as_mut().vmctx_plus_offset_mut::<usize>(
offset + offset_of!(VMTableImport, from) as u32,
) = INVALID_PTR;
*self.as_mut().vmctx_plus_offset_mut::<usize>(
offset + offset_of!(VMTableImport, vmctx) as u32,
) = INVALID_PTR;
}
}
}
}
pub fn component(&self) -> &Component {
&self.component
}
pub fn component_and_self(self: Pin<&mut Self>) -> (&Component, Pin<&mut Self>) {
let component = unsafe { &*(&raw const self.component) };
(component, self)
}
pub fn resource_types(&self) -> &Arc<PrimaryMap<ResourceIndex, ResourceType>> {
&self.resource_types
}
pub fn resource_types_mut(
self: Pin<&mut Self>,
) -> &mut Arc<PrimaryMap<ResourceIndex, ResourceType>> {
unsafe { &mut self.get_unchecked_mut().resource_types }
}
pub fn resource_owned_by_own_instance(&self, ty: TypeResourceTableIndex) -> bool {
let (resource_ty, resource_instance) = match self.component.types()[ty] {
TypeResourceTable::Concrete { ty, instance } => (ty, instance),
TypeResourceTable::Abstract(_) => return false,
};
let component = self.component.env_component();
let idx = match component.defined_resource_index(resource_ty) {
Some(idx) => idx,
None => return false,
};
resource_instance == component.defined_resource_instances[idx]
}
#[inline]
pub fn instance_states(
self: Pin<&mut Self>,
) -> (
&mut PrimaryMap<RuntimeComponentInstanceIndex, InstanceState>,
&ComponentTypes,
) {
unsafe {
let me = self.get_unchecked_mut();
(&mut me.instance_states, me.component.types())
}
}
pub fn instance_state(
self: Pin<&mut Self>,
instance: RuntimeComponentInstanceIndex,
) -> &mut InstanceState {
&mut self.instance_states().0[instance]
}
pub fn dtor_and_instance(
&self,
ty: TypeResourceTableIndex,
) -> (Option<NonNull<VMFuncRef>>, Option<RuntimeInstance>) {
let resource = self.component.types()[ty].unwrap_concrete_ty();
let dtor = self.resource_destructor(resource);
let component = self.component.env_component();
let instance = component
.defined_resource_index(resource)
.map(|i| RuntimeInstance {
instance: self.id(),
index: component.defined_resource_instances[i],
});
(dtor, instance)
}
pub fn id(&self) -> ComponentInstanceId {
self.id
}
pub fn push_instance_id(self: Pin<&mut Self>, id: InstanceId) -> RuntimeInstanceIndex {
self.instances_mut().push(id)
}
pub fn instance(&self, idx: RuntimeInstanceIndex) -> InstanceId {
self.instances[idx]
}
fn instances_mut(self: Pin<&mut Self>) -> &mut PrimaryMap<RuntimeInstanceIndex, InstanceId> {
unsafe { &mut self.get_unchecked_mut().instances }
}
pub(crate) fn runtime_import(&self, import: RuntimeImportIndex) -> &RuntimeImport {
&self.imports[import]
}
pub unsafe fn instance_pre<T>(&self) -> InstancePre<T> {
unsafe {
InstancePre::new_unchecked(
self.component.clone(),
self.imports.clone(),
self.resource_types.clone(),
)
}
}
pub(crate) fn task_may_block(&self) -> NonNull<VMGlobalDefinition> {
unsafe { self.vmctx_plus_offset_raw::<VMGlobalDefinition>(self.offsets.task_may_block()) }
}
#[cfg(feature = "component-model-async")]
pub(crate) fn get_task_may_block(&self) -> bool {
unsafe { *self.task_may_block().as_ref().as_i32() != 0 }
}
#[cfg(feature = "component-model-async")]
pub(crate) fn set_task_may_block(self: Pin<&mut Self>, val: bool) {
unsafe { *self.task_may_block().as_mut().as_i32_mut() = if val { 1 } else { 0 } }
}
}
unsafe impl InstanceLayout for ComponentInstance {
const INIT_ZEROED: bool = true;
type VMContext = VMComponentContext;
fn layout(&self) -> Layout {
ComponentInstance::alloc_layout(&self.offsets)
}
fn owned_vmctx(&self) -> &OwnedVMContext<VMComponentContext> {
&self.vmctx
}
fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMComponentContext> {
&mut self.vmctx
}
}
pub type OwnedComponentInstance = OwnedInstance<ComponentInstance>;
impl VMComponentContext {
pub fn instance(&self) -> *mut ComponentInstance {
unsafe {
(self as *const Self as *mut u8)
.offset(-(offset_of!(ComponentInstance, vmctx) as isize))
as *mut ComponentInstance
}
}
#[inline]
pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMComponentContext> {
unsafe {
debug_assert_eq!(opaque.as_ref().magic, VMCOMPONENT_MAGIC);
}
opaque.cast()
}
}
impl VMOpaqueContext {
#[inline]
pub fn from_vmcomponent(ptr: NonNull<VMComponentContext>) -> NonNull<VMOpaqueContext> {
ptr.cast()
}
}
#[repr(transparent)]
#[derive(Copy, Clone)]
pub struct InstanceFlags(SendSyncPtr<VMGlobalDefinition>);
impl InstanceFlags {
pub unsafe fn from_raw(ptr: NonNull<VMGlobalDefinition>) -> InstanceFlags {
InstanceFlags(SendSyncPtr::from(ptr))
}
#[inline]
pub unsafe fn may_leave(&self) -> bool {
unsafe { *self.as_raw().as_ref().as_i32() & FLAG_MAY_LEAVE != 0 }
}
#[inline]
pub unsafe fn set_may_leave(&mut self, val: bool) {
unsafe {
if val {
*self.as_raw().as_mut().as_i32_mut() |= FLAG_MAY_LEAVE;
} else {
*self.as_raw().as_mut().as_i32_mut() &= !FLAG_MAY_LEAVE;
}
}
}
#[inline]
pub fn as_raw(&self) -> NonNull<VMGlobalDefinition> {
self.0.as_non_null()
}
}