use super::{ArrayInit, GcCompiler};
use crate::bounds_checks::BoundsCheck;
use crate::func_environ::{Extension, FuncEnvironment};
use crate::translate::{Heap, HeapData, StructFieldsVec, TargetEnvironment};
use crate::trap::TranslateTrap;
use crate::{Reachability, TRAP_INTERNAL_ASSERT};
use cranelift_codegen::ir::immediates::Offset32;
use cranelift_codegen::ir::{BlockArg, ExceptionTableData, ExceptionTableItem};
use cranelift_codegen::{
cursor::FuncCursor,
ir::{self, InstBuilder, condcodes::IntCC},
};
use cranelift_entity::packed_option::ReservedValue;
use cranelift_frontend::FunctionBuilder;
use smallvec::{SmallVec, smallvec};
use wasmtime_environ::{
Collector, GcArrayLayout, GcLayout, GcStructLayout, I31_DISCRIMINANT, ModuleInternedTypeIndex,
PtrSize, TagIndex, TypeIndex, VMGcKind, WasmCompositeInnerType, WasmHeapTopType, WasmHeapType,
WasmRefType, WasmResult, WasmStorageType, WasmValType, wasm_unsupported,
};
#[cfg(feature = "gc-drc")]
mod drc;
#[cfg(feature = "gc-null")]
mod null;
pub fn gc_compiler(func_env: &mut FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
func_env.needs_gc_heap = true;
match func_env.tunables.collector {
#[cfg(feature = "gc-drc")]
Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
#[cfg(not(feature = "gc-drc"))]
Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
"the DRC collector is unavailable because the `gc-drc` feature \
was disabled at compile time",
)),
#[cfg(feature = "gc-null")]
Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
#[cfg(not(feature = "gc-null"))]
Some(Collector::Null) => Err(wasm_unsupported!(
"the null collector is unavailable because the `gc-null` feature \
was disabled at compile time",
)),
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
None => Err(wasm_unsupported!(
"support for GC types disabled at configuration time"
)),
#[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
None => Err(wasm_unsupported!(
"support for GC types disabled because no collector implementation \
was selected at compile time; enable one of the `gc-drc` or \
`gc-null` features",
)),
}
}
#[cfg_attr(
not(feature = "gc-drc"),
expect(dead_code, reason = "easier to define")
)]
fn unbarriered_load_gc_ref(
builder: &mut FunctionBuilder,
ty: WasmHeapType,
ptr_to_gc_ref: ir::Value,
flags: ir::MemFlags,
) -> WasmResult<ir::Value> {
debug_assert!(ty.is_vmgcref_type());
let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
if ty != WasmHeapType::I31 {
builder.declare_value_needs_stack_map(gc_ref);
}
Ok(gc_ref)
}
#[cfg_attr(
not(any(feature = "gc-drc", feature = "gc-null")),
expect(dead_code, reason = "easier to define")
)]
fn unbarriered_store_gc_ref(
builder: &mut FunctionBuilder,
ty: WasmHeapType,
dst: ir::Value,
gc_ref: ir::Value,
flags: ir::MemFlags,
) -> WasmResult<()> {
debug_assert!(ty.is_vmgcref_type());
builder.ins().store(flags, gc_ref, dst, 0);
Ok(())
}
fn emit_gc_kind_assert(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
gc_ref: ir::Value,
expected_kind: VMGcKind,
) {
if !cfg!(gc_zeal) {
return;
}
func_env.trapz(builder, gc_ref, crate::TRAP_NULL_REFERENCE);
let kind_addr = func_env.prepare_gc_ref_access(
builder,
gc_ref,
BoundsCheck::StaticObjectField {
offset: wasmtime_environ::VM_GC_HEADER_KIND_OFFSET,
access_size: wasmtime_environ::VM_GC_KIND_SIZE,
object_size: wasmtime_environ::VM_GC_HEADER_SIZE,
},
);
let kind_and_reserved_bits = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
kind_addr,
0,
);
let kind_mask = builder
.ins()
.iconst(ir::types::I32, i64::from(VMGcKind::MASK));
let actual_kind = builder.ins().band(kind_and_reserved_bits, kind_mask);
let expected_kind = builder
.ins()
.iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
let and = builder.ins().band(actual_kind, expected_kind);
let matches = builder.ins().icmp(IntCC::Equal, and, expected_kind);
builder.ins().trapz(matches, TRAP_INTERNAL_ASSERT);
}
fn read_field_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
ty: WasmStorageType,
addr: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
assert_eq!(
extension.is_some(),
matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
);
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
let value = match ty {
WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
WasmStorageType::Val(v) => match v {
WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
WasmValType::Ref(r) => match r.heap_type.top() {
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
gc_compiler(func_env)?
.translate_read_gc_reference(func_env, builder, r, addr, flags)?
}
WasmHeapTopType::Func => {
let expected_ty = match r.heap_type {
WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
WasmHeapType::NoFunc => {
let null = builder.ins().iconst(func_env.pointer_type(), 0);
if !r.nullable {
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
}
return Ok(null);
}
_ => unreachable!("not a function heap type"),
};
let expected_ty = builder
.ins()
.iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
let vmctx = func_env.vmctx_val(&mut builder.cursor());
let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
let get_interned_func_ref = func_env
.builtin_functions
.get_interned_func_ref(builder.func);
let call_inst = builder
.ins()
.call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
builder.func.dfg.first_result(call_inst)
}
WasmHeapTopType::Cont => {
return Err(wasmtime_environ::WasmError::Unsupported(
"Stack switching feature not compatible with GC, yet".to_string(),
));
}
},
},
};
let value = match extension {
Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
None => value,
};
Ok(value)
}
fn write_func_ref_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
ref_type: WasmRefType,
flags: ir::MemFlags,
field_addr: ir::Value,
func_ref: ir::Value,
) -> WasmResult<()> {
assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
let vmctx = func_env.vmctx_val(&mut builder.cursor());
let intern_func_ref_for_gc_heap = func_env
.builtin_functions
.intern_func_ref_for_gc_heap(builder.func);
let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
let null = builder.ins().iconst(func_env.pointer_type(), 0);
if !ref_type.nullable {
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
}
null
} else {
func_ref
};
let call_inst = builder
.ins()
.call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
let func_ref_id = builder.func.dfg.first_result(call_inst);
let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
builder.ins().store(flags, func_ref_id, field_addr, 0);
Ok(())
}
fn write_field_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
field_ty: WasmStorageType,
field_addr: ir::Value,
new_val: ir::Value,
) -> WasmResult<()> {
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
match field_ty {
WasmStorageType::I8 => {
builder.ins().istore8(flags, new_val, field_addr, 0);
}
WasmStorageType::I16 => {
builder.ins().istore16(flags, new_val, field_addr, 0);
}
WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
}
WasmStorageType::Val(WasmValType::Ref(r)) => {
gc_compiler(func_env)?
.translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
}
WasmStorageType::Val(_) => {
assert_eq!(
builder.func.dfg.value_type(new_val).bytes(),
wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
);
builder.ins().store(flags, new_val, field_addr, 0);
}
}
Ok(())
}
pub fn translate_struct_new(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
fields: &[ir::Value],
) -> WasmResult<ir::Value> {
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
}
fn default_value(
cursor: &mut FuncCursor,
func_env: &FuncEnvironment<'_>,
ty: &WasmStorageType,
) -> ir::Value {
match ty {
WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
WasmStorageType::Val(v) => match v {
WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
WasmValType::F32 => cursor.ins().f32const(0.0),
WasmValType::F64 => cursor.ins().f64const(0.0),
WasmValType::V128 => {
let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
cursor.ins().vconst(ir::types::I8X16, c)
}
WasmValType::Ref(r) => {
assert!(r.nullable);
let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
let _ = needs_stack_map;
cursor.ins().iconst(ty, 0)
}
},
}
}
pub fn translate_struct_new_default(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
) -> WasmResult<ir::Value> {
let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
let fields = struct_ty
.fields
.iter()
.map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
.collect::<StructFieldsVec>();
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
}
pub fn translate_struct_get(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
log::trace!(
"translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})"
);
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
emit_gc_kind_assert(func_env, builder, struct_ref, VMGcKind::StructRef);
let field_index = usize::try_from(field_index).unwrap();
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
let struct_size = struct_layout.size;
let field_offset = struct_layout.fields[field_index].offset;
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
assert!(field_offset + field_size <= struct_size);
let field_addr = func_env.prepare_gc_ref_access(
builder,
struct_ref,
BoundsCheck::StaticObjectField {
offset: field_offset,
access_size: u8::try_from(field_size).unwrap(),
object_size: struct_size,
},
);
let result = read_field_at_addr(
func_env,
builder,
field_ty.element_type,
field_addr,
extension,
);
log::trace!("translate_struct_get(..) -> {result:?}");
result
}
pub fn translate_struct_set(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
new_val: ir::Value,
) -> WasmResult<()> {
log::trace!(
"translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
);
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
emit_gc_kind_assert(func_env, builder, struct_ref, VMGcKind::StructRef);
let field_index = usize::try_from(field_index).unwrap();
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
let struct_size = struct_layout.size;
let field_offset = struct_layout.fields[field_index].offset;
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
assert!(field_offset + field_size <= struct_size);
let field_addr = func_env.prepare_gc_ref_access(
builder,
struct_ref,
BoundsCheck::StaticObjectField {
offset: field_offset,
access_size: u8::try_from(field_size).unwrap(),
object_size: struct_size,
},
);
write_field_at_addr(
func_env,
builder,
field_ty.element_type,
field_addr,
new_val,
)?;
log::trace!("translate_struct_set: finished");
Ok(())
}
pub fn translate_exn_unbox(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
tag_index: TagIndex,
exn_ref: ir::Value,
) -> WasmResult<SmallVec<[ir::Value; 4]>> {
log::trace!("translate_exn_unbox({tag_index:?}, {exn_ref:?})");
let exception_ty_idx = func_env
.exception_type_from_tag(tag_index)
.unwrap_module_type_index();
let exception_ty = func_env.types.unwrap_exn(exception_ty_idx)?;
let exn_layout = func_env.struct_or_exn_layout(exception_ty_idx);
let exn_size = exn_layout.size;
let mut accesses: SmallVec<[_; 4]> = smallvec![];
for (field_ty, field_layout) in exception_ty.fields.iter().zip(exn_layout.fields.iter()) {
accesses.push((field_layout.offset, field_ty.element_type));
}
let mut result = smallvec![];
for (field_offset, field_ty) in accesses {
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty);
assert!(field_offset + field_size <= exn_size);
let field_addr = func_env.prepare_gc_ref_access(
builder,
exn_ref,
BoundsCheck::StaticObjectField {
offset: field_offset,
access_size: u8::try_from(field_size).unwrap(),
object_size: exn_size,
},
);
let value = read_field_at_addr(func_env, builder, field_ty, field_addr, None)?;
result.push(value);
}
log::trace!("translate_exn_unbox(..) -> {result:?}");
Ok(result)
}
pub fn translate_exn_throw(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
tag_index: TagIndex,
args: &[ir::Value],
) -> WasmResult<()> {
let (instance_id, defined_tag_id) = func_env.get_instance_and_tag(builder, tag_index);
let exnref = gc_compiler(func_env)?.alloc_exn(
func_env,
builder,
tag_index,
args,
instance_id,
defined_tag_id,
)?;
translate_exn_throw_ref(func_env, builder, exnref)
}
pub fn translate_exn_throw_ref(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
exnref: ir::Value,
) -> WasmResult<()> {
let builtin = func_env.builtin_functions.throw_ref(builder.func);
let sig = builder.func.dfg.ext_funcs[builtin].signature;
let vmctx = func_env.vmctx_val(&mut builder.cursor());
let continuation = builder.create_block();
let current_block = builder.current_block().unwrap();
builder.insert_block_after(continuation, current_block);
let continuation_call = builder.func.dfg.block_call(continuation, &[]);
let mut table_items = vec![ExceptionTableItem::Context(vmctx)];
for (tag, block) in func_env.stacks.handlers.handlers() {
let block_call = builder
.func
.dfg
.block_call(block, &[BlockArg::TryCallExn(0)]);
table_items.push(match tag {
Some(tag) => ExceptionTableItem::Tag(tag, block_call),
None => ExceptionTableItem::Default(block_call),
});
}
let etd = ExceptionTableData::new(sig, continuation_call, table_items);
let et = builder.func.dfg.exception_tables.push(etd);
builder.ins().try_call(builtin, &[vmctx, exnref], et);
builder.switch_to_block(continuation);
builder.seal_block(continuation);
func_env.trap(builder, crate::TRAP_UNREACHABLE);
Ok(())
}
pub fn translate_array_new(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elem: ir::Value,
len: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Fill { elem, len },
)?;
log::trace!("translate_array_new(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_new_default(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
len: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
let array_ty = func_env.types.unwrap_array(interned_ty)?;
let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Fill { elem, len },
)?;
log::trace!("translate_array_new_default(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_new_fixed(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elems: &[ir::Value],
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Elems(elems),
)?;
log::trace!("translate_array_new_fixed(..) -> {result:?}");
Ok(result)
}
impl ArrayInit<'_> {
#[cfg_attr(
not(any(feature = "gc-drc", feature = "gc-null")),
expect(dead_code, reason = "easier to define")
)]
fn len(self, pos: &mut FuncCursor) -> ir::Value {
match self {
ArrayInit::Fill { len, .. } => len,
ArrayInit::Elems(e) => {
let len = u32::try_from(e.len()).unwrap();
pos.ins().iconst(ir::types::I32, i64::from(len))
}
}
}
#[cfg_attr(
not(any(feature = "gc-drc", feature = "gc-null")),
expect(dead_code, reason = "easier to define")
)]
fn initialize(
self,
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
interned_type_index: ModuleInternedTypeIndex,
base_size: u32,
size: ir::Value,
elems_addr: ir::Value,
mut init_field: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
WasmStorageType,
ir::Value,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
log::trace!(
"initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
);
assert!(!func_env.types[interned_type_index].composite_type.shared);
let array_ty = func_env.types[interned_type_index]
.composite_type
.inner
.unwrap_array();
let elem_ty = array_ty.0.element_type;
let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
let pointer_type = func_env.pointer_type();
let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
match self {
ArrayInit::Elems(elems) => {
let mut elem_addr = elems_addr;
for val in elems {
init_field(func_env, builder, elem_ty, elem_addr, *val)?;
elem_addr = builder.ins().iadd(elem_addr, elem_size);
}
}
ArrayInit::Fill { elem, len: _ } => {
let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
let array_addr = builder.ins().isub(elems_addr, base_size);
let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
let elems_end = builder.ins().iadd(array_addr, size);
emit_array_fill_impl(
func_env,
builder,
elems_addr,
elem_size,
elems_end,
|func_env, builder, elem_addr| {
init_field(func_env, builder, elem_ty, elem_addr, elem)
},
)?;
}
}
log::trace!("initialize_array: finished");
Ok(())
}
}
fn emit_array_fill_impl(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
elem_addr: ir::Value,
elem_size: ir::Value,
fill_end: ir::Value,
mut emit_elem_write: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
log::trace!(
"emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})"
);
let pointer_ty = func_env.pointer_type();
assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
let current_block = builder.current_block().unwrap();
let loop_header_block = builder.create_block();
let loop_body_block = builder.create_block();
let continue_block = builder.create_block();
builder.ensure_inserted_block();
builder.insert_block_after(loop_header_block, current_block);
builder.insert_block_after(loop_body_block, loop_header_block);
builder.insert_block_after(continue_block, loop_body_block);
builder.ins().jump(loop_header_block, &[elem_addr.into()]);
builder.switch_to_block(loop_header_block);
builder.append_block_param(loop_header_block, pointer_ty);
log::trace!("emit_array_fill_impl: loop header");
func_env.translate_loop_header(builder)?;
let elem_addr = builder.block_params(loop_header_block)[0];
let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
builder
.ins()
.brif(done, continue_block, &[], loop_body_block, &[]);
builder.switch_to_block(loop_body_block);
log::trace!("emit_array_fill_impl: loop body");
emit_elem_write(func_env, builder, elem_addr)?;
let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
builder
.ins()
.jump(loop_header_block, &[next_elem_addr.into()]);
builder.switch_to_block(continue_block);
log::trace!("emit_array_fill_impl: finished");
builder.seal_block(loop_header_block);
builder.seal_block(loop_body_block);
builder.seal_block(continue_block);
Ok(())
}
pub fn translate_array_fill(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
value: ir::Value,
n: ir::Value,
) -> WasmResult<()> {
log::trace!(
"translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
);
let len = translate_array_len(func_env, builder, array_ref)?;
let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let out_of_bounds = builder
.ins()
.icmp(IntCC::UnsignedGreaterThan, end_index, len);
func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
} = emit_array_size_info(func_env, builder, interned_type_index, len);
let offset_in_elems = builder.ins().imul(index, one_elem_size);
let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
let elem_addr = func_env.prepare_gc_ref_access(
builder,
array_ref,
BoundsCheck::DynamicObjectField {
offset: obj_offset,
object_size: obj_size,
},
);
let fill_size = builder.ins().imul(n, one_elem_size);
let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), fill_size);
let fill_end = builder.ins().iadd(elem_addr, fill_size);
let one_elem_size =
uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
let result = emit_array_fill_impl(
func_env,
builder,
elem_addr,
one_elem_size,
fill_end,
|func_env, builder, elem_addr| {
let elem_ty = func_env
.types
.unwrap_array(interned_type_index)?
.0
.element_type;
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
},
);
log::trace!("translate_array_fill(..) -> {result:?}");
result
}
pub fn translate_array_len(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_ref: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_len({array_ref:?})");
func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
let len_field = func_env.prepare_gc_ref_access(
builder,
array_ref,
BoundsCheck::StaticOffset {
offset: len_offset,
access_size: u8::try_from(ir::types::I32.bytes()).unwrap(),
},
);
let result = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
len_field,
0,
);
log::trace!("translate_array_len(..) -> {result:?}");
Ok(result)
}
struct ArraySizeInfo {
obj_size: ir::Value,
one_elem_size: ir::Value,
base_size: ir::Value,
}
fn emit_array_size_info(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: ModuleInternedTypeIndex,
array_len: ir::Value,
) -> ArraySizeInfo {
let array_layout = func_env.array_layout(array_type_index);
let one_elem_size = builder
.ins()
.iconst(ir::types::I64, i64::from(array_layout.elem_size));
let array_len = builder.ins().uextend(ir::types::I64, array_len);
let all_elems_size = builder.ins().imul(one_elem_size, array_len);
let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
let base_size = builder
.ins()
.iconst(ir::types::I32, i64::from(array_layout.base_size));
let obj_size =
builder
.ins()
.uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
}
}
fn array_elem_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: ModuleInternedTypeIndex,
array_ref: ir::Value,
index: ir::Value,
) -> ir::Value {
let len = translate_array_len(func_env, builder, array_ref).unwrap();
let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
} = emit_array_size_info(func_env, builder, array_type_index, len);
let offset_in_elems = builder.ins().imul(index, one_elem_size);
let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
func_env.prepare_gc_ref_access(
builder,
array_ref,
BoundsCheck::DynamicObjectField {
offset: offset_in_array,
object_size: obj_size,
},
)
}
pub fn translate_array_get(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
emit_gc_kind_assert(func_env, builder, array_ref, VMGcKind::ArrayRef);
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
let array_ty = func_env.types.unwrap_array(array_type_index)?;
let elem_ty = array_ty.0.element_type;
let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
log::trace!("translate_array_get(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_set(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
value: ir::Value,
) -> WasmResult<()> {
log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
emit_gc_kind_assert(func_env, builder, array_ref, VMGcKind::ArrayRef);
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
let array_ty = func_env.types.unwrap_array(array_type_index)?;
let elem_ty = array_ty.0.element_type;
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
log::trace!("translate_array_set: finished");
Ok(())
}
pub fn translate_ref_test(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
test_ty: WasmRefType,
val: ir::Value,
val_ty: WasmRefType,
) -> WasmResult<ir::Value> {
log::trace!("translate_ref_test({test_ty:?}, {val:?})");
if test_ty.heap_type.is_bottom() {
let result = if test_ty.nullable {
func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?
} else {
builder.ins().iconst(ir::types::I32, 0)
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
if test_ty.heap_type.is_top() {
let result = if test_ty.nullable {
builder.ins().iconst(ir::types::I32, 1)
} else {
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
let zero = builder.ins().iconst(ir::types::I32, 0);
let one = builder.ins().iconst(ir::types::I32, 1);
builder.ins().select(is_null, zero, one)
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
if test_ty.heap_type == WasmHeapType::I31 {
let i31_mask = builder.ins().iconst(
ir::types::I32,
i64::from(wasmtime_environ::I31_DISCRIMINANT),
);
let is_i31 = builder.ins().band(val, i31_mask);
let result = if test_ty.nullable {
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
builder.ins().bor(is_null, is_i31)
} else {
is_i31
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
let is_any_hierarchy = test_ty.heap_type.top() == WasmHeapTopType::Any;
let non_null_block = builder.create_block();
let non_null_non_i31_block = builder.create_block();
let continue_block = builder.create_block();
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
let result_when_is_null = builder
.ins()
.iconst(ir::types::I32, test_ty.nullable as i64);
builder.ins().brif(
is_null,
continue_block,
&[result_when_is_null.into()],
non_null_block,
&[],
);
builder.switch_to_block(non_null_block);
log::trace!("translate_ref_test: non-null ref block");
if is_any_hierarchy {
let i31_mask = builder.ins().iconst(
ir::types::I32,
i64::from(wasmtime_environ::I31_DISCRIMINANT),
);
let is_i31 = builder.ins().band(val, i31_mask);
let result_when_is_i31 = builder.ins().iconst(
ir::types::I32,
matches!(
test_ty.heap_type,
WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
) as i64,
);
builder.ins().brif(
is_i31,
continue_block,
&[result_when_is_i31.into()],
non_null_non_i31_block,
&[],
);
} else {
builder.ins().jump(non_null_non_i31_block, &[]);
}
builder.switch_to_block(non_null_non_i31_block);
log::trace!("translate_ref_test: non-null and non-i31 ref block");
let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
val: ir::Value,
expected_kind: VMGcKind|
-> ir::Value {
let kind_addr = func_env.prepare_gc_ref_access(
builder,
val,
BoundsCheck::StaticObjectField {
offset: wasmtime_environ::VM_GC_HEADER_KIND_OFFSET,
access_size: wasmtime_environ::VM_GC_KIND_SIZE,
object_size: wasmtime_environ::VM_GC_HEADER_SIZE,
},
);
let actual_kind = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
kind_addr,
0,
);
let expected_kind = builder
.ins()
.iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
let and = builder.ins().band(actual_kind, expected_kind);
let kind_matches = builder
.ins()
.icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
builder.ins().uextend(ir::types::I32, kind_matches)
};
let result = match test_ty.heap_type {
WasmHeapType::Any
| WasmHeapType::None
| WasmHeapType::Extern
| WasmHeapType::NoExtern
| WasmHeapType::Func
| WasmHeapType::NoFunc
| WasmHeapType::Cont
| WasmHeapType::NoCont
| WasmHeapType::Exn
| WasmHeapType::NoExn
| WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
WasmHeapType::ConcreteArray(ty)
| WasmHeapType::ConcreteStruct(ty)
| WasmHeapType::ConcreteExn(ty) => {
let expected_interned_ty = ty.unwrap_module_type_index();
let expected_shared_ty =
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
let ty_addr = func_env.prepare_gc_ref_access(
builder,
val,
BoundsCheck::StaticOffset {
offset: wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET,
access_size: func_env.offsets.size_of_vmshared_type_index(),
},
);
let actual_shared_ty = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
ty_addr,
0,
);
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
}
WasmHeapType::ConcreteFunc(ty) => {
let expected_interned_ty = ty.unwrap_module_type_index();
let expected_shared_ty =
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
let actual_shared_ty = func_env.load_funcref_type_index(
&mut builder.cursor(),
ir::MemFlags::trusted().with_readonly(),
val,
);
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
}
WasmHeapType::ConcreteCont(_) => {
return Err(wasmtime_environ::WasmError::Unsupported(
"Stack switching feature not compatible with GC, yet".to_string(),
));
}
};
builder.ins().jump(continue_block, &[result.into()]);
builder.switch_to_block(continue_block);
let result = builder.append_block_param(continue_block, ir::types::I32);
log::trace!("translate_ref_test(..) -> {result:?}");
builder.seal_block(non_null_block);
builder.seal_block(non_null_non_i31_block);
builder.seal_block(continue_block);
Ok(result)
}
fn uextend_i32_to_pointer_type(
builder: &mut FunctionBuilder,
pointer_type: ir::Type,
value: ir::Value,
) -> ir::Value {
assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
match pointer_type {
ir::types::I32 => value,
ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
_ => unreachable!(),
}
}
#[cfg_attr(
not(any(feature = "gc-drc", feature = "gc-null")),
expect(dead_code, reason = "easier to define")
)]
fn emit_array_size(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_layout: &GcArrayLayout,
len: ir::Value,
) -> ir::Value {
let base_size = builder
.ins()
.iconst(ir::types::I32, i64::from(array_layout.base_size));
debug_assert_eq!(builder.func.dfg.value_type(len), ir::types::I32);
let len = builder.ins().uextend(ir::types::I64, len);
let elems_size_64 = builder
.ins()
.imul_imm(len, i64::from(array_layout.elem_size));
let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
let size = func_env.uadd_overflow_trap(
builder,
base_size,
elems_size,
crate::TRAP_ALLOCATION_TOO_LARGE,
);
size
}
#[cfg_attr(
not(any(feature = "gc-drc", feature = "gc-null")),
expect(dead_code, reason = "easier to define")
)]
fn initialize_struct_fields(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_ty: ModuleInternedTypeIndex,
raw_ptr_to_struct: ir::Value,
field_values: &[ir::Value],
mut init_field: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
WasmStorageType,
ir::Value,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
let struct_layout = func_env.struct_or_exn_layout(struct_ty);
let struct_size = struct_layout.size;
let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().map(|f| f.offset).collect();
assert_eq!(field_offsets.len(), field_values.len());
assert!(!func_env.types[struct_ty].composite_type.shared);
let fields = match &func_env.types[struct_ty].composite_type.inner {
WasmCompositeInnerType::Struct(s) => &s.fields,
WasmCompositeInnerType::Exn(e) => &e.fields,
_ => panic!("Not a struct or exception type"),
};
let field_types: SmallVec<[_; 8]> = fields.iter().cloned().collect();
assert_eq!(field_types.len(), field_values.len());
for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
assert!(offset + size_of_access <= struct_size);
let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
init_field(func_env, builder, ty.element_type, field_addr, *val)?;
}
Ok(())
}
impl FuncEnvironment<'_> {
fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
if !self.ty_to_gc_layout.contains_key(&type_index) {
let ty = &self.types[type_index].composite_type;
let layout = gc_compiler(self)
.unwrap()
.layouts()
.gc_layout(ty)
.expect("should only call `FuncEnvironment::gc_layout` for GC types");
self.ty_to_gc_layout.insert(type_index, layout);
}
self.ty_to_gc_layout.get(&type_index).unwrap()
}
fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
self.gc_layout(type_index).unwrap_array()
}
fn struct_or_exn_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
let result = self.gc_layout(type_index).unwrap_struct();
result
}
fn get_gc_heap_base_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
if let Some(base) = self.gc_heap_base {
return base;
}
let store_context_ptr = self.get_vmstore_context_ptr_global(func);
let offset = self.offsets.ptr.vmstore_context_gc_heap_base();
let mut flags = ir::MemFlags::trusted();
if !self
.tunables
.gc_heap_memory_type()
.memory_may_move(self.tunables)
{
flags.set_readonly();
flags.set_can_move();
}
let base = func.create_global_value(ir::GlobalValueData::Load {
base: store_context_ptr,
offset: Offset32::new(offset.into()),
global_type: self.pointer_type(),
flags,
});
self.gc_heap_base = Some(base);
base
}
#[cfg(any(feature = "gc-null", feature = "gc-drc"))]
fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
let global = self.get_gc_heap_base_global(&mut builder.func);
builder.ins().global_value(self.pointer_type(), global)
}
fn get_gc_heap_bound_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
if let Some(bound) = self.gc_heap_bound {
return bound;
}
let store_context_ptr = self.get_vmstore_context_ptr_global(func);
let offset = self.offsets.ptr.vmstore_context_gc_heap_current_length();
let bound = func.create_global_value(ir::GlobalValueData::Load {
base: store_context_ptr,
offset: Offset32::new(offset.into()),
global_type: self.pointer_type(),
flags: ir::MemFlags::trusted(),
});
self.gc_heap_bound = Some(bound);
bound
}
#[cfg(feature = "gc-null")]
fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
let global = self.get_gc_heap_bound_global(&mut builder.func);
builder.ins().global_value(self.pointer_type(), global)
}
fn get_gc_heap(&mut self, func: &mut ir::Function) -> Heap {
if let Some(heap) = self.gc_heap {
return heap;
}
let base = self.get_gc_heap_base_global(func);
let bound = self.get_gc_heap_bound_global(func);
let memory = self.tunables.gc_heap_memory_type();
let heap = self.heaps.push(HeapData {
base,
bound,
memory,
});
self.gc_heap = Some(heap);
heap
}
fn prepare_gc_ref_access(
&mut self,
builder: &mut FunctionBuilder,
gc_ref: ir::Value,
bounds_check: BoundsCheck,
) -> ir::Value {
log::trace!("prepare_gc_ref_access({gc_ref:?}, {bounds_check:?})");
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
let gc_heap = self.get_gc_heap(&mut builder.func);
let gc_heap = self.heaps[gc_heap].clone();
let result = match crate::bounds_checks::bounds_check_and_compute_addr(
builder,
self,
&gc_heap,
gc_ref,
bounds_check,
crate::TRAP_INTERNAL_ASSERT,
) {
Reachability::Reachable(v) => v,
Reachability::Unreachable => {
let null = builder.ins().iconst(self.pointer_type(), 0);
builder.ins().trapz(null, crate::TRAP_INTERNAL_ASSERT);
null
}
};
log::trace!("prepare_gc_ref_access(..) -> {result:?}");
result
}
#[cfg_attr(
not(feature = "gc-drc"),
expect(dead_code, reason = "easier to define")
)]
fn gc_ref_is_null_or_i31(
&mut self,
builder: &mut FunctionBuilder,
ty: WasmRefType,
gc_ref: ir::Value,
) -> ir::Value {
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
assert!(ty.is_vmgcref_type_and_not_i31());
let might_be_i31 = match ty.heap_type {
WasmHeapType::I31 => unreachable!(),
WasmHeapType::Any | WasmHeapType::Eq => true,
WasmHeapType::Array
| WasmHeapType::ConcreteArray(_)
| WasmHeapType::Struct
| WasmHeapType::ConcreteStruct(_)
| WasmHeapType::None => false,
WasmHeapType::Extern => true,
WasmHeapType::NoExtern => false,
WasmHeapType::Exn | WasmHeapType::ConcreteExn(_) | WasmHeapType::NoExn => false,
WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
unreachable!()
}
WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => {
unreachable!()
}
};
match (ty.nullable, might_be_i31) {
(false, false) => builder.ins().iconst(ir::types::I32, 0),
(false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
(true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
(true, true) => {
let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
let is_null = builder.ins().uextend(ir::types::I32, is_null);
builder.ins().bor(is_i31, is_null)
}
}
}
pub(crate) fn is_subtype(
&mut self,
builder: &mut FunctionBuilder<'_>,
a: ir::Value,
b: ir::Value,
) -> ir::Value {
log::trace!("is_subtype({a:?}, {b:?})");
let diff_tys_block = builder.create_block();
let continue_block = builder.create_block();
log::trace!("is_subtype: fast path check for exact same types");
let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
builder.ins().brif(
same_ty,
continue_block,
&[same_ty.into()],
diff_tys_block,
&[],
);
builder.switch_to_block(diff_tys_block);
log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
let is_subtype = self.builtin_functions.is_subtype(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
let result = builder.func.dfg.first_result(call_inst);
builder.ins().jump(continue_block, &[result.into()]);
builder.switch_to_block(continue_block);
let result = builder.append_block_param(continue_block, ir::types::I32);
log::trace!("is_subtype(..) -> {result:?}");
builder.seal_block(diff_tys_block);
builder.seal_block(continue_block);
result
}
}