use cranelift_codegen::ir::BlockArg;
use itertools::{Either, Itertools};
use crate::trap::TranslateTrap;
use cranelift_codegen::ir::condcodes::*;
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{self, MemFlags};
use cranelift_codegen::ir::{Block, BlockCall, InstBuilder, JumpTableData};
use cranelift_frontend::FunctionBuilder;
use wasmtime_environ::{PtrSize, TagIndex, TypeIndex, WasmResult, WasmValType, wasm_unsupported};
fn control_context_size(triple: &target_lexicon::Triple) -> WasmResult<u8> {
match (triple.architecture, triple.operating_system) {
(target_lexicon::Architecture::X86_64, target_lexicon::OperatingSystem::Linux) => Ok(24),
_ => Err(wasm_unsupported!(
"stack switching not supported on {triple}"
)),
}
}
use super::control_effect::ControlEffect;
use super::fatpointer;
pub(crate) mod stack_switching_helpers {
use core::marker::PhantomData;
use cranelift_codegen::ir;
use cranelift_codegen::ir::InstBuilder;
use cranelift_codegen::ir::condcodes::IntCC;
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{StackSlot, StackSlotKind::*};
use cranelift_frontend::FunctionBuilder;
use wasmtime_environ::PtrSize;
pub(crate) trait VMHostArrayEntry {
fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32);
}
impl VMHostArrayEntry for u128 {
fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(_p: &P) -> (u8, u32) {
(16, 16)
}
}
impl<T> VMHostArrayEntry for *mut T {
fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32) {
(p.size(), p.size().into())
}
}
#[derive(Copy, Clone)]
pub struct VMContRef {
pub address: ir::Value,
}
#[derive(Copy, Clone)]
pub struct VMHostArrayRef<T> {
address: ir::Value,
phantom: PhantomData<T>,
}
pub type VMPayloads = VMHostArrayRef<u128>;
pub type VMHandlerList = VMHostArrayRef<*mut u8>;
pub struct VMStackChain {
discriminant: ir::Value,
payload: ir::Value,
}
pub struct VMCommonStackInformation {
pub address: ir::Value,
}
pub struct VMContinuationStack {
tos_ptr: ir::Value,
}
impl VMContRef {
pub fn new(address: ir::Value) -> VMContRef {
VMContRef { address }
}
pub fn args<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMPayloads {
let offset: i64 = env.offsets.ptr.vmcontref_args().into();
let address = builder.ins().iadd_imm(self.address, offset);
VMPayloads::new(address)
}
pub fn values<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMPayloads {
let offset: i64 = env.offsets.ptr.vmcontref_values().into();
let address = builder.ins().iadd_imm(self.address, offset);
VMPayloads::new(address)
}
pub fn common_stack_information<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMCommonStackInformation {
let offset: i64 = env.offsets.ptr.vmcontref_common_stack_information().into();
let address = builder.ins().iadd_imm(self.address, offset);
VMCommonStackInformation { address }
}
pub fn set_parent_stack_chain<'a>(
&mut self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
new_stack_chain: &VMStackChain,
) {
let offset = env.offsets.ptr.vmcontref_parent_chain().into();
new_stack_chain.store(env, builder, self.address, offset)
}
pub fn get_parent_stack_chain<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMStackChain {
let offset = env.offsets.ptr.vmcontref_parent_chain().into();
VMStackChain::load(env, builder, self.address, offset, env.pointer_type())
}
pub fn set_last_ancestor<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
last_ancestor: ir::Value,
) {
let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
let mem_flags = ir::MemFlags::trusted();
builder
.ins()
.store(mem_flags, last_ancestor, self.address, offset);
}
pub fn get_last_ancestor<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
let mem_flags = ir::MemFlags::trusted();
builder
.ins()
.load(env.pointer_type(), mem_flags, self.address, offset)
}
pub fn get_revision<'a>(
&mut self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let mem_flags = ir::MemFlags::trusted();
let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
let revision = builder.ins().load(I64, mem_flags, self.address, offset);
revision
}
pub fn incr_revision<'a>(
&mut self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
revision: ir::Value,
) -> ir::Value {
let mem_flags = ir::MemFlags::trusted();
let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
let revision_plus1 = builder.ins().iadd_imm(revision, 1);
builder
.ins()
.store(mem_flags, revision_plus1, self.address, offset);
revision_plus1
}
pub fn get_fiber_stack<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMContinuationStack {
let offset: i64 = env.offsets.ptr.vmcontref_stack().into();
let fiber_stack_top_of_stack_ptr = builder.ins().iadd_imm(self.address, offset);
VMContinuationStack::new(fiber_stack_top_of_stack_ptr)
}
}
impl<T: VMHostArrayEntry> VMHostArrayRef<T> {
pub(crate) fn new(address: ir::Value) -> Self {
Self {
address,
phantom: PhantomData::default(),
}
}
fn get(&self, builder: &mut FunctionBuilder, ty: ir::Type, offset: i32) -> ir::Value {
let mem_flags = ir::MemFlags::trusted();
builder.ins().load(ty, mem_flags, self.address, offset)
}
fn set<U>(&self, builder: &mut FunctionBuilder, offset: i32, value: ir::Value) {
debug_assert_eq!(
builder.func.dfg.value_type(value),
Type::int_with_byte_size(u16::try_from(core::mem::size_of::<U>()).unwrap())
.unwrap()
);
let mem_flags = ir::MemFlags::trusted();
builder.ins().store(mem_flags, value, self.address, offset);
}
pub fn get_data<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let offset = env.offsets.ptr.vmhostarray_data().into();
self.get(builder, env.pointer_type(), offset)
}
pub fn get_length<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let offset = env.offsets.ptr.vmhostarray_length().into();
self.get(builder, I32, offset)
}
fn set_length<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
length: ir::Value,
) {
let offset = env.offsets.ptr.vmhostarray_length().into();
self.set::<u32>(builder, offset, length);
}
fn set_capacity<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
capacity: ir::Value,
) {
let offset = env.offsets.ptr.vmhostarray_capacity().into();
self.set::<u32>(builder, offset, capacity);
}
fn set_data<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
data: ir::Value,
) {
debug_assert_eq!(builder.func.dfg.value_type(data), env.pointer_type());
let offset: i32 = env.offsets.ptr.vmhostarray_data().into();
let mem_flags = ir::MemFlags::trusted();
builder.ins().store(mem_flags, data, self.address, offset);
}
pub fn occupy_next_slots<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
arg_count: i32,
) -> ir::Value {
let data = self.get_data(env, builder);
let original_length = self.get_length(env, builder);
let new_length = builder
.ins()
.iadd_imm(original_length, i64::from(arg_count));
self.set_length(env, builder, new_length);
let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
let original_length = builder.ins().uextend(I64, original_length);
let byte_offset = builder
.ins()
.imul_imm(original_length, i64::from(entry_size));
builder.ins().iadd(data, byte_offset)
}
pub fn allocate_or_reuse_stack_slot<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
required_capacity: u32,
existing_slot: Option<StackSlot>,
) -> StackSlot {
let (align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
let required_size = required_capacity * entry_size;
match existing_slot {
Some(slot) if builder.func.sized_stack_slots[slot].size >= required_size => {
let slot_data = &builder.func.sized_stack_slots[slot];
debug_assert!(align <= slot_data.align_shift);
debug_assert_eq!(slot_data.kind, ExplicitSlot);
let existing_capacity = slot_data.size / entry_size;
let capacity_value = builder.ins().iconst(I32, i64::from(existing_capacity));
let existing_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
self.set_capacity(env, builder, capacity_value);
self.set_data(env, builder, existing_data);
slot
}
_ => {
let capacity_value = builder.ins().iconst(I32, i64::from(required_capacity));
let slot_size = ir::StackSlotData::new(
ir::StackSlotKind::ExplicitSlot,
required_size,
align,
);
let slot = builder.create_sized_stack_slot(slot_size);
let new_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
self.set_capacity(env, builder, capacity_value);
self.set_data(env, builder, new_data);
slot
}
}
}
pub fn load_data_entries<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
load_types: &[ir::Type],
) -> Vec<ir::Value> {
let memflags = ir::MemFlags::trusted();
let data_start_pointer = self.get_data(env, builder);
let mut values = vec![];
let mut offset = 0;
let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
for valtype in load_types {
let val = builder
.ins()
.load(*valtype, memflags, data_start_pointer, offset);
values.push(val);
offset += i32::try_from(entry_size).unwrap();
}
values
}
pub fn store_data_entries<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
values: &[ir::Value],
) {
let store_count = builder
.ins()
.iconst(I32, i64::try_from(values.len()).unwrap());
let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
debug_assert!(values.iter().all(|val| {
let ty = builder.func.dfg.value_type(*val);
let size = ty.bytes();
size <= entry_size
}));
let memflags = ir::MemFlags::trusted();
let data_start_pointer = self.get_data(env, builder);
let mut offset = 0;
for value in values {
builder
.ins()
.store(memflags, *value, data_start_pointer, offset);
offset += i32::try_from(entry_size).unwrap();
}
self.set_length(env, builder, store_count);
}
pub fn clear<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
discard_buffer: bool,
) {
let zero32 = builder.ins().iconst(I32, 0);
self.set_length(env, builder, zero32);
if discard_buffer {
let zero32 = builder.ins().iconst(I32, 0);
self.set_capacity(env, builder, zero32);
let zero_ptr = builder.ins().iconst(env.pointer_type(), 0);
self.set_data(env, builder, zero_ptr);
}
}
}
impl VMStackChain {
pub fn from_continuation<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
contref: ir::Value,
) -> VMStackChain {
debug_assert_eq!(
env.offsets.ptr.size_of_vmstack_chain(),
2 * env.offsets.ptr.size()
);
let discriminant = wasmtime_environ::STACK_CHAIN_CONTINUATION_DISCRIMINANT;
let discriminant = builder
.ins()
.iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
VMStackChain {
discriminant,
payload: contref,
}
}
pub fn absent<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMStackChain {
debug_assert_eq!(
env.offsets.ptr.size_of_vmstack_chain(),
2 * env.offsets.ptr.size()
);
let discriminant = wasmtime_environ::STACK_CHAIN_ABSENT_DISCRIMINANT;
let discriminant = builder
.ins()
.iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
let zero_filler = builder.ins().iconst(env.pointer_type(), 0i64);
VMStackChain {
discriminant,
payload: zero_filler,
}
}
pub fn is_initial_stack<'a>(
&self,
_env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
builder.ins().icmp_imm(
IntCC::Equal,
self.discriminant,
i64::try_from(wasmtime_environ::STACK_CHAIN_INITIAL_STACK_DISCRIMINANT).unwrap(),
)
}
pub fn to_raw_parts(&self) -> [ir::Value; 2] {
[self.discriminant, self.payload]
}
pub fn from_raw_parts(raw_data: [ir::Value; 2]) -> VMStackChain {
VMStackChain {
discriminant: raw_data[0],
payload: raw_data[1],
}
}
pub fn load<'a>(
_env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
pointer: ir::Value,
initial_offset: i32,
pointer_type: ir::Type,
) -> VMStackChain {
let memflags = ir::MemFlags::trusted();
let mut offset = initial_offset;
let mut data = vec![];
for _ in 0..2 {
data.push(builder.ins().load(pointer_type, memflags, pointer, offset));
offset += i32::try_from(pointer_type.bytes()).unwrap();
}
let data = <[ir::Value; 2]>::try_from(data).unwrap();
Self::from_raw_parts(data)
}
pub fn store<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
target_pointer: ir::Value,
initial_offset: i32,
) {
let memflags = ir::MemFlags::trusted();
let mut offset = initial_offset;
let data = self.to_raw_parts();
for value in data {
debug_assert_eq!(builder.func.dfg.value_type(value), env.pointer_type());
builder.ins().store(memflags, value, target_pointer, offset);
offset += i32::try_from(env.pointer_type().bytes()).unwrap();
}
}
pub fn unchecked_get_continuation(&self) -> ir::Value {
self.payload
}
pub fn get_common_stack_information<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
_builder: &mut FunctionBuilder,
) -> VMCommonStackInformation {
let address = self.payload;
debug_assert_eq!(env.offsets.ptr.vmcontref_common_stack_information(), 0);
VMCommonStackInformation { address }
}
}
impl VMCommonStackInformation {
fn get_state_ptr<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let offset: i64 = env.offsets.ptr.vmcommon_stack_information_state().into();
builder.ins().iadd_imm(self.address, offset)
}
fn get_stack_limits_ptr<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let offset: i64 = env.offsets.ptr.vmcommon_stack_information_limits().into();
builder.ins().iadd_imm(self.address, offset)
}
fn load_state<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let mem_flags = ir::MemFlags::trusted();
let state_ptr = self.get_state_ptr(env, builder);
builder.ins().load(I32, mem_flags, state_ptr, 0)
}
fn set_state_no_payload<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
discriminant: u32,
) {
let discriminant = builder.ins().iconst(I32, i64::from(discriminant));
let mem_flags = ir::MemFlags::trusted();
let state_ptr = self.get_state_ptr(env, builder);
builder.ins().store(mem_flags, discriminant, state_ptr, 0);
}
pub fn set_state_running<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) {
let discriminant = wasmtime_environ::STACK_STATE_RUNNING_DISCRIMINANT;
self.set_state_no_payload(env, builder, discriminant);
}
pub fn set_state_parent<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) {
let discriminant = wasmtime_environ::STACK_STATE_PARENT_DISCRIMINANT;
self.set_state_no_payload(env, builder, discriminant);
}
pub fn set_state_returned<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) {
let discriminant = wasmtime_environ::STACK_STATE_RETURNED_DISCRIMINANT;
self.set_state_no_payload(env, builder, discriminant);
}
pub fn set_state_suspended<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) {
let discriminant = wasmtime_environ::STACK_STATE_SUSPENDED_DISCRIMINANT;
self.set_state_no_payload(env, builder, discriminant);
}
pub fn was_invoked<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let actual_state = self.load_state(env, builder);
let allocated = wasmtime_environ::STACK_STATE_FRESH_DISCRIMINANT;
builder
.ins()
.icmp_imm(IntCC::NotEqual, actual_state, i64::from(allocated))
}
pub fn get_handler_list<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> VMHandlerList {
let offset: i64 = env.offsets.ptr.vmcommon_stack_information_handlers().into();
let address = builder.ins().iadd_imm(self.address, offset);
VMHandlerList::new(address)
}
pub fn get_first_switch_handler_index<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let memflags = ir::MemFlags::trusted();
let offset: i32 = env
.offsets
.ptr
.vmcommon_stack_information_first_switch_handler_index()
.into();
builder.ins().load(I32, memflags, self.address, offset)
}
pub fn set_first_switch_handler_index<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
value: ir::Value,
) {
let memflags = ir::MemFlags::trusted();
let offset: i32 = env
.offsets
.ptr
.vmcommon_stack_information_first_switch_handler_index()
.into();
builder.ins().store(memflags, value, self.address, offset);
}
pub fn write_limits_to_vmcontext<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmruntime_limits_ptr: ir::Value,
) {
let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
let memflags = ir::MemFlags::trusted();
let mut copy_to_vm_runtime_limits = |our_offset, their_offset| {
let our_value = builder.ins().load(
env.pointer_type(),
memflags,
stack_limits_ptr,
i32::from(our_offset),
);
builder.ins().store(
memflags,
our_value,
vmruntime_limits_ptr,
i32::from(their_offset),
);
};
let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
copy_to_vm_runtime_limits(
stack_limit_offset,
pointer_size.vmstore_context_stack_limit(),
);
copy_to_vm_runtime_limits(
last_wasm_entry_fp_offset,
pointer_size.vmstore_context_last_wasm_entry_fp(),
);
}
pub fn load_limits_from_vmcontext<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmruntime_limits_ptr: ir::Value,
load_stack_limit: bool,
) {
let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
let memflags = ir::MemFlags::trusted();
let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
let mut copy = |runtime_limits_offset, stack_limits_offset| {
let from_vm_runtime_limits = builder.ins().load(
env.pointer_type(),
memflags,
vmruntime_limits_ptr,
runtime_limits_offset,
);
builder.ins().store(
memflags,
from_vm_runtime_limits,
stack_limits_ptr,
stack_limits_offset,
);
};
let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
copy(
pointer_size.vmstore_context_last_wasm_entry_fp(),
last_wasm_entry_fp_offset,
);
if load_stack_limit {
let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
copy(
pointer_size.vmstore_context_stack_limit(),
stack_limit_offset,
);
}
}
}
impl VMContinuationStack {
pub fn new(tos_ptr: ir::Value) -> Self {
Self { tos_ptr }
}
fn load_top_of_stack<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let mem_flags = ir::MemFlags::trusted();
builder
.ins()
.load(env.pointer_type(), mem_flags, self.tos_ptr, 0)
}
pub fn load_control_context<'a>(
&self,
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
) -> ir::Value {
let tos = self.load_top_of_stack(env, builder);
builder.ins().iadd_imm(tos, -0x18)
}
}
}
use helpers::VMStackChain;
use stack_switching_helpers as helpers;
pub(crate) fn vmcontref_store_payloads<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
values: &[ir::Value],
contref: ir::Value,
) {
let count =
i32::try_from(values.len()).expect("Number of stack switching payloads should fit in i32");
if values.len() > 0 {
let use_args_block = builder.create_block();
let use_payloads_block = builder.create_block();
let store_data_block = builder.create_block();
builder.append_block_param(store_data_block, env.pointer_type());
let co = helpers::VMContRef::new(contref);
let csi = co.common_stack_information(env, builder);
let was_invoked = csi.was_invoked(env, builder);
builder
.ins()
.brif(was_invoked, use_payloads_block, &[], use_args_block, &[]);
{
builder.switch_to_block(use_args_block);
builder.seal_block(use_args_block);
let args = co.args(env, builder);
let ptr = args.occupy_next_slots(env, builder, count);
builder
.ins()
.jump(store_data_block, &[BlockArg::Value(ptr)]);
}
{
builder.switch_to_block(use_payloads_block);
builder.seal_block(use_payloads_block);
let payloads = co.values(env, builder);
let ptr = payloads.occupy_next_slots(env, builder, count);
builder
.ins()
.jump(store_data_block, &[BlockArg::Value(ptr)]);
}
{
builder.switch_to_block(store_data_block);
builder.seal_block(store_data_block);
let ptr = builder.block_params(store_data_block)[0];
let memflags = ir::MemFlags::trusted();
let mut offset = 0;
for value in values {
builder.ins().store(memflags, *value, ptr, offset);
offset += i32::from(env.offsets.ptr.maximum_value_size());
}
}
}
}
pub(crate) fn tag_address<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
index: u32,
) -> ir::Value {
let vmctx = env.vmctx_val(&mut builder.cursor());
let tag_index = wasmtime_environ::TagIndex::from_u32(index);
let pointer_type = env.pointer_type();
if let Some(def_index) = env.module.defined_tag_index(tag_index) {
let offset = i32::try_from(env.offsets.vmctx_vmtag_definition(def_index)).unwrap();
builder.ins().iadd_imm(vmctx, i64::from(offset))
} else {
let offset = i32::try_from(env.offsets.vmctx_vmtag_import_from(tag_index)).unwrap();
builder.ins().load(
pointer_type,
ir::MemFlags::trusted().with_readonly(),
vmctx,
ir::immediates::Offset32::new(offset),
)
}
}
pub fn vmctx_load_stack_chain<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmctx: ir::Value,
) -> VMStackChain {
let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
let vm_store_context = builder.ins().load(
env.pointer_type(),
MemFlags::trusted(),
vmctx,
vm_store_context_offset,
);
VMStackChain::load(
env,
builder,
vm_store_context,
stack_chain_offset,
env.pointer_type(),
)
}
pub fn vmctx_store_stack_chain<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmctx: ir::Value,
stack_chain: &VMStackChain,
) {
let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
let vm_store_context = builder.ins().load(
env.pointer_type(),
MemFlags::trusted(),
vmctx,
vm_store_context_offset,
);
stack_chain.store(env, builder, vm_store_context, stack_chain_offset)
}
pub fn vmctx_set_active_continuation<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmctx: ir::Value,
contref: ir::Value,
) {
let chain = VMStackChain::from_continuation(env, builder, contref);
vmctx_store_stack_chain(env, builder, vmctx, &chain)
}
pub fn vmctx_load_vm_runtime_limits_ptr<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
vmctx: ir::Value,
) -> ir::Value {
let pointer_type = env.pointer_type();
let offset = i32::from(env.offsets.ptr.vmctx_store_context());
let flags = ir::MemFlags::trusted().with_readonly();
builder.ins().load(pointer_type, flags, vmctx, offset)
}
fn search_handler<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
start: &helpers::VMStackChain,
tag_address: ir::Value,
search_suspend_handlers: bool,
) -> (VMStackChain, ir::Value, ir::Value) {
let handle_link = builder.create_block();
let begin_search_handler_list = builder.create_block();
let try_index = builder.create_block();
let compare_tags = builder.create_block();
let on_match = builder.create_block();
let on_no_match = builder.create_block();
let block_args = start.to_raw_parts().map(|v| BlockArg::Value(v));
builder.ins().jump(handle_link, &block_args);
let chain_link = {
builder.append_block_param(handle_link, env.pointer_type());
builder.append_block_param(handle_link, env.pointer_type());
builder.switch_to_block(handle_link);
let raw_parts = builder.block_params(handle_link);
let chain_link = helpers::VMStackChain::from_raw_parts([raw_parts[0], raw_parts[1]]);
let is_initial_stack = chain_link.is_initial_stack(env, builder);
builder.ins().brif(
is_initial_stack,
on_no_match,
&[],
begin_search_handler_list,
&[],
);
chain_link
};
let (contref, parent_link, handler_list_data_ptr, end_range) = {
builder.switch_to_block(begin_search_handler_list);
let contref = chain_link.unchecked_get_continuation();
let contref = helpers::VMContRef::new(contref);
let parent_link = contref.get_parent_stack_chain(env, builder);
let parent_csi = parent_link.get_common_stack_information(env, builder);
let handlers = parent_csi.get_handler_list(env, builder);
let handler_list_data_ptr = handlers.get_data(env, builder);
let first_switch_handler_index = parent_csi.get_first_switch_handler_index(env, builder);
let (begin_range, end_range) = if search_suspend_handlers {
let zero = builder.ins().iconst(I32, 0);
(zero, first_switch_handler_index)
} else {
let length = handlers.get_length(env, builder);
(first_switch_handler_index, length)
};
builder
.ins()
.jump(try_index, &[BlockArg::Value(begin_range)]);
(contref, parent_link, handler_list_data_ptr, end_range)
};
let index = {
builder.append_block_param(try_index, I32);
builder.switch_to_block(try_index);
let index = builder.block_params(try_index)[0];
let in_bounds = builder
.ins()
.icmp(IntCC::UnsignedLessThan, index, end_range);
let block_args = parent_link.to_raw_parts().map(|v| BlockArg::Value(v));
builder
.ins()
.brif(in_bounds, compare_tags, &[], handle_link, &block_args);
index
};
{
builder.switch_to_block(compare_tags);
let base = handler_list_data_ptr;
let entry_size = env.pointer_type().bytes();
let offset = builder.ins().imul_imm(index, i64::from(entry_size));
let offset = builder.ins().uextend(I64, offset);
let entry_address = builder.ins().iadd(base, offset);
let memflags = ir::MemFlags::trusted();
let handled_tag = builder
.ins()
.load(env.pointer_type(), memflags, entry_address, 0);
let tags_match = builder.ins().icmp(IntCC::Equal, handled_tag, tag_address);
let incremented_index = builder.ins().iadd_imm(index, 1);
builder.ins().brif(
tags_match,
on_match,
&[],
try_index,
&[BlockArg::Value(incremented_index)],
);
}
{
builder.switch_to_block(on_no_match);
builder.set_cold_block(on_no_match);
builder.ins().trap(crate::TRAP_UNHANDLED_TAG);
}
builder.seal_block(handle_link);
builder.seal_block(begin_search_handler_list);
builder.seal_block(try_index);
builder.seal_block(compare_tags);
builder.seal_block(on_match);
builder.seal_block(on_no_match);
builder.switch_to_block(on_match);
(parent_link, contref.address, index)
}
pub(crate) fn translate_cont_bind<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
contobj: ir::Value,
args: &[ir::Value],
) -> ir::Value {
let (witness, contref) = fatpointer::deconstruct(env, &mut builder.cursor(), contobj);
builder.ins().trapz(contref, crate::TRAP_NULL_REFERENCE);
let mut vmcontref = helpers::VMContRef::new(contref);
let revision = vmcontref.get_revision(env, builder);
let evidence = builder.ins().icmp(IntCC::Equal, witness, revision);
builder
.ins()
.trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
vmcontref_store_payloads(env, builder, args, contref);
let revision = vmcontref.incr_revision(env, builder, revision);
let contobj = fatpointer::construct(env, &mut builder.cursor(), revision, contref);
contobj
}
pub(crate) fn translate_cont_new<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
func: ir::Value,
arg_types: &[WasmValType],
return_types: &[WasmValType],
) -> WasmResult<ir::Value> {
builder.ins().trapz(func, crate::TRAP_NULL_REFERENCE);
let nargs = builder
.ins()
.iconst(I32, i64::try_from(arg_types.len()).unwrap());
let nreturns = builder
.ins()
.iconst(I32, i64::try_from(return_types.len()).unwrap());
let cont_new_func = super::builtins::cont_new(env, &mut builder.func)?;
let vmctx = env.vmctx_val(&mut builder.cursor());
let call_inst = builder
.ins()
.call(cont_new_func, &[vmctx, func, nargs, nreturns]);
let contref = *builder.func.dfg.inst_results(call_inst).first().unwrap();
let tag = helpers::VMContRef::new(contref).get_revision(env, builder);
let contobj = fatpointer::construct(env, &mut builder.cursor(), tag, contref);
Ok(contobj)
}
pub(crate) fn translate_resume<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
type_index: u32,
resume_contobj: ir::Value,
resume_args: &[ir::Value],
resumetable: &[(u32, Option<ir::Block>)],
) -> WasmResult<Vec<ir::Value>> {
let resume_block = builder.create_block();
let return_block = builder.create_block();
let suspend_block = builder.create_block();
let dispatch_block = builder.create_block();
let vmctx = env.vmctx_val(&mut builder.cursor());
let (suspend_handlers, switch_tags): (Vec<(u32, Block)>, Vec<u32>) = resumetable
.iter()
.partition_map(|(tag_index, block_opt)| match block_opt {
Some(block) => Either::Left((*tag_index, *block)),
None => Either::Right(*tag_index),
});
builder.ins().jump(resume_block, &[]);
let (resume_result, vm_runtime_limits_ptr, original_stack_chain, new_stack_chain) = {
builder.switch_to_block(resume_block);
builder.seal_block(resume_block);
let (witness, resume_contref) =
fatpointer::deconstruct(env, &mut builder.cursor(), resume_contobj);
builder
.ins()
.trapz(resume_contref, crate::TRAP_NULL_REFERENCE);
let mut vmcontref = helpers::VMContRef::new(resume_contref);
let revision = vmcontref.get_revision(env, builder);
let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
builder
.ins()
.trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
let _next_revision = vmcontref.incr_revision(env, builder, revision);
if resume_args.len() > 0 {
vmcontref_store_payloads(env, builder, resume_args, resume_contref);
}
let mut last_ancestor = helpers::VMContRef::new(vmcontref.get_last_ancestor(env, builder));
let original_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
last_ancestor.set_parent_stack_chain(env, builder, &original_stack_chain);
let zero = builder.ins().iconst(env.pointer_type(), 0);
vmcontref.set_last_ancestor(env, builder, zero);
vmctx_set_active_continuation(env, builder, vmctx, resume_contref);
let resume_contref = helpers::VMContRef::new(resume_contref);
let resume_csi = resume_contref.common_stack_information(env, builder);
let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
resume_csi.set_state_running(env, builder);
parent_csi.set_state_parent(env, builder);
let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
parent_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, true);
resume_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
let handler_list = parent_csi.get_handler_list(env, builder);
if resumetable.len() > 0 {
let handler_count = u32::try_from(resumetable.len()).unwrap();
env.stack_switching_handler_list_buffer =
Some(handler_list.allocate_or_reuse_stack_slot(
env,
builder,
handler_count,
env.stack_switching_handler_list_buffer,
));
let suspend_handler_count = suspend_handlers.len();
let all_handlers = suspend_handlers
.iter()
.map(|(tag_index, _block)| *tag_index)
.chain(switch_tags);
let all_tag_addresses: Vec<ir::Value> = all_handlers
.map(|tag_index| tag_address(env, builder, tag_index))
.collect();
handler_list.store_data_entries(env, builder, &all_tag_addresses);
let first_switch_handler_index = builder
.ins()
.iconst(I32, i64::try_from(suspend_handler_count).unwrap());
parent_csi.set_first_switch_handler_index(env, builder, first_switch_handler_index);
}
let resume_payload = ControlEffect::encode_resume(builder).to_u64();
let fiber_stack = last_ancestor.get_fiber_stack(env, builder);
let control_context_ptr = fiber_stack.load_control_context(env, builder);
let result =
builder
.ins()
.stack_switch(control_context_ptr, control_context_ptr, resume_payload);
let new_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
vmctx_store_stack_chain(env, builder, vmctx, &original_stack_chain);
parent_csi.set_state_running(env, builder);
handler_list.clear(env, builder, true);
parent_csi.set_first_switch_handler_index(env, builder, zero);
let result = ControlEffect::from_u64(result);
let signal = result.signal(builder);
builder
.ins()
.brif(signal, suspend_block, &[], return_block, &[]);
(
result,
vm_runtime_limits_ptr,
original_stack_chain,
new_stack_chain,
)
};
let (handler_index, suspended_contref, suspended_contobj) = {
builder.switch_to_block(suspend_block);
builder.seal_block(suspend_block);
let suspended_continuation = new_stack_chain.unchecked_get_continuation();
let mut suspended_continuation = helpers::VMContRef::new(suspended_continuation);
let suspended_csi = suspended_continuation.common_stack_information(env, builder);
suspended_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
let handler_index = resume_result.handler_index(builder);
let revision = suspended_continuation.get_revision(env, builder);
let suspended_contobj = fatpointer::construct(
env,
&mut builder.cursor(),
revision,
suspended_continuation.address,
);
builder.ins().jump(dispatch_block, &[]);
(handler_index, suspended_continuation, suspended_contobj)
};
let jt_default_block = builder.create_block();
{
builder.switch_to_block(jt_default_block);
builder.set_cold_block(jt_default_block);
builder.ins().trap(crate::TRAP_UNREACHABLE);
}
let target_preamble_blocks = {
let mut preamble_blocks = vec![];
for &(handle_tag, target_block) in &suspend_handlers {
let preamble_block = builder.create_block();
preamble_blocks.push(preamble_block);
builder.switch_to_block(preamble_block);
let param_types = env.tag_params(TagIndex::from_u32(handle_tag));
let param_types: Vec<ir::Type> = param_types
.iter()
.map(|wty| crate::value_type(env.isa(), *wty))
.collect();
let values = suspended_contref.values(env, builder);
let mut suspend_args: Vec<BlockArg> = values
.load_data_entries(env, builder, ¶m_types)
.into_iter()
.map(|v| BlockArg::Value(v))
.collect();
suspend_args.push(BlockArg::Value(suspended_contobj));
values.clear(env, builder, false);
builder.ins().jump(target_block, &suspend_args);
}
preamble_blocks
};
{
builder.switch_to_block(dispatch_block);
builder.seal_block(dispatch_block);
let default_bc = builder.func.dfg.block_call(jt_default_block, &[]);
let adapter_bcs: Vec<BlockCall> = target_preamble_blocks
.iter()
.map(|b| builder.func.dfg.block_call(*b, &[]))
.collect();
let jt_data = JumpTableData::new(default_bc, &adapter_bcs);
let jt = builder.create_jump_table(jt_data);
builder.ins().br_table(handler_index, jt);
for preamble_block in target_preamble_blocks {
builder.seal_block(preamble_block);
}
builder.seal_block(jt_default_block);
}
{
builder.switch_to_block(return_block);
builder.seal_block(return_block);
let returned_contref = new_stack_chain.unchecked_get_continuation();
let returned_contref = helpers::VMContRef::new(returned_contref);
let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
let returned_csi = returned_contref.common_stack_information(env, builder);
returned_csi.set_state_returned(env, builder);
let return_types: Vec<_> = env
.continuation_returns(TypeIndex::from_u32(type_index))
.iter()
.map(|ty| crate::value_type(env.isa(), *ty))
.collect();
let payloads = returned_contref.args(env, builder);
let return_values = payloads.load_data_entries(env, builder, &return_types);
payloads.clear(env, builder, true);
Ok(return_values)
}
}
pub(crate) fn translate_suspend<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
tag_index: u32,
suspend_args: &[ir::Value],
tag_return_types: &[ir::Type],
) -> Vec<ir::Value> {
let tag_addr = tag_address(env, builder, tag_index);
let vmctx = env.vmctx_val(&mut builder.cursor());
let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
let (_, end_of_chain_contref, handler_index) =
search_handler(env, builder, &active_stack_chain, tag_addr, true);
let active_contref = active_stack_chain.unchecked_get_continuation();
let active_contref = helpers::VMContRef::new(active_contref);
let mut end_of_chain_contref = helpers::VMContRef::new(end_of_chain_contref);
active_contref.set_last_ancestor(env, builder, end_of_chain_contref.address);
let values = active_contref.values(env, builder);
let required_capacity =
u32::try_from(std::cmp::max(suspend_args.len(), tag_return_types.len()))
.expect("Number of stack switching payloads should fit in u32");
if required_capacity > 0 {
env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
env,
builder,
required_capacity,
env.stack_switching_values_buffer,
));
}
if suspend_args.len() > 0 {
values.store_data_entries(env, builder, suspend_args);
}
let active_contref_csi = active_contref.common_stack_information(env, builder);
active_contref_csi.set_state_suspended(env, builder);
let absent_chain_link = VMStackChain::absent(env, builder);
end_of_chain_contref.set_parent_stack_chain(env, builder, &absent_chain_link);
let suspend_payload = ControlEffect::encode_suspend(builder, handler_index).to_u64();
let fiber_stack = end_of_chain_contref.get_fiber_stack(env, builder);
let control_context_ptr = fiber_stack.load_control_context(env, builder);
builder
.ins()
.stack_switch(control_context_ptr, control_context_ptr, suspend_payload);
let values = active_contref.values(env, builder);
let return_values = values.load_data_entries(env, builder, tag_return_types);
values.clear(env, builder, true);
return_values
}
pub(crate) fn translate_switch<'a>(
env: &mut crate::func_environ::FuncEnvironment<'a>,
builder: &mut FunctionBuilder,
tag_index: u32,
switchee_contobj: ir::Value,
switch_args: &[ir::Value],
return_types: &[ir::Type],
) -> WasmResult<Vec<ir::Value>> {
let vmctx = env.vmctx_val(&mut builder.cursor());
let switchee_contref = {
let (witness, target_contref) =
fatpointer::deconstruct(env, &mut builder.cursor(), switchee_contobj);
builder
.ins()
.trapz(target_contref, crate::TRAP_NULL_REFERENCE);
let mut target_contref = helpers::VMContRef::new(target_contref);
let revision = target_contref.get_revision(env, builder);
let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
builder
.ins()
.trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
let _next_revision = target_contref.incr_revision(env, builder, revision);
target_contref
};
let (
switcher_contref,
switcher_contobj,
switcher_contref_last_ancestor,
handler_stack_chain,
vm_runtime_limits_ptr,
) = {
let tag_addr = tag_address(env, builder, tag_index);
let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
let (handler_stack_chain, last_ancestor, _handler_index) =
search_handler(env, builder, &active_stack_chain, tag_addr, false);
let mut last_ancestor = helpers::VMContRef::new(last_ancestor);
let switcher_contref = active_stack_chain.unchecked_get_continuation();
let mut switcher_contref = helpers::VMContRef::new(switcher_contref);
switcher_contref.set_last_ancestor(env, builder, last_ancestor.address);
let values = switcher_contref.values(env, builder);
let required_capacity = u32::try_from(return_types.len()).unwrap();
if required_capacity > 0 {
env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
env,
builder,
required_capacity,
env.stack_switching_values_buffer,
));
}
let switcher_contref_csi = switcher_contref.common_stack_information(env, builder);
switcher_contref_csi.set_state_suspended(env, builder);
let absent = VMStackChain::absent(env, builder);
last_ancestor.set_parent_stack_chain(env, builder, &absent);
let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
switcher_contref_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
let revision = switcher_contref.get_revision(env, builder);
let new_contobj = fatpointer::construct(
env,
&mut builder.cursor(),
revision,
switcher_contref.address,
);
(
switcher_contref,
new_contobj,
last_ancestor,
handler_stack_chain,
vm_runtime_limits_ptr,
)
};
let (switchee_contref_csi, switchee_contref_last_ancestor) = {
let mut combined_payloads = switch_args.to_vec();
combined_payloads.push(switcher_contobj);
vmcontref_store_payloads(env, builder, &combined_payloads, switchee_contref.address);
let switchee_contref_csi = switchee_contref.common_stack_information(env, builder);
switchee_contref_csi.set_state_running(env, builder);
let switchee_contref_last_ancestor = switchee_contref.get_last_ancestor(env, builder);
let mut switchee_contref_last_ancestor =
helpers::VMContRef::new(switchee_contref_last_ancestor);
switchee_contref_last_ancestor.set_parent_stack_chain(env, builder, &handler_stack_chain);
(switchee_contref_csi, switchee_contref_last_ancestor)
};
{
vmctx_set_active_continuation(env, builder, vmctx, switchee_contref.address);
switchee_contref_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
}
{
let switcher_last_ancestor_fs =
switcher_contref_last_ancestor.get_fiber_stack(env, builder);
let switcher_last_ancestor_cc =
switcher_last_ancestor_fs.load_control_context(env, builder);
let switchee_last_ancestor_fs =
switchee_contref_last_ancestor.get_fiber_stack(env, builder);
let switchee_last_ancestor_cc =
switchee_last_ancestor_fs.load_control_context(env, builder);
let cctx_size = control_context_size(env.isa().triple())?;
let slot_size = ir::StackSlotData::new(
ir::StackSlotKind::ExplicitSlot,
u32::from(cctx_size),
u8::try_from(env.pointer_type().bytes()).unwrap(),
);
let slot = builder.create_sized_stack_slot(slot_size);
let tmp_control_context = builder.ins().stack_addr(env.pointer_type(), slot, 0);
let flags = MemFlags::trusted();
let mut offset: i32 = 0;
while offset < i32::from(cctx_size) {
let tmp1 =
builder
.ins()
.load(env.pointer_type(), flags, switchee_last_ancestor_cc, offset);
builder
.ins()
.store(flags, tmp1, tmp_control_context, offset);
let tmp2 =
builder
.ins()
.load(env.pointer_type(), flags, switcher_last_ancestor_cc, offset);
builder
.ins()
.store(flags, tmp2, switchee_last_ancestor_cc, offset);
offset += i32::try_from(env.pointer_type().bytes()).unwrap();
}
let switch_payload = ControlEffect::encode_switch(builder).to_u64();
let _result = builder.ins().stack_switch(
switcher_last_ancestor_cc,
tmp_control_context,
switch_payload,
);
}
let return_values = {
let payloads = switcher_contref.values(env, builder);
let return_values = payloads.load_data_entries(env, builder, return_types);
payloads.clear(env, builder, true);
return_values
};
Ok(return_values)
}