#[cfg(feature = "gc-drc")]
pub mod drc;
#[cfg(feature = "gc-null")]
pub mod null;
use crate::{
WasmArrayType, WasmCompositeInnerType, WasmCompositeType, WasmStorageType, WasmStructType,
WasmValType,
};
use crate::{WasmExnType, prelude::*};
use core::alloc::Layout;
pub const I31_DISCRIMINANT: u32 = 1;
pub const VM_GC_HEADER_SIZE: u32 = 8;
pub const VM_GC_HEADER_ALIGN: u32 = 8;
pub const VM_GC_HEADER_KIND_OFFSET: u32 = 0;
pub const VM_GC_HEADER_TYPE_INDEX_OFFSET: u32 = 4;
pub fn byte_size_of_wasm_ty_in_gc_heap(ty: &WasmStorageType) -> u32 {
match ty {
WasmStorageType::I8 => 1,
WasmStorageType::I16 => 2,
WasmStorageType::Val(ty) => match ty {
WasmValType::I32 | WasmValType::F32 | WasmValType::Ref(_) => 4,
WasmValType::I64 | WasmValType::F64 => 8,
WasmValType::V128 => 16,
},
}
}
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
fn align_up(offset: &mut u32, max_align: &mut u32, align: u32) -> u32 {
debug_assert!(max_align.is_power_of_two());
debug_assert!(align.is_power_of_two());
*offset = offset.checked_add(align - 1).unwrap() & !(align - 1);
*max_align = core::cmp::max(*max_align, align);
*offset
}
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
fn field(size: &mut u32, align: &mut u32, bytes: u32) -> u32 {
let offset = align_up(size, align, bytes);
*size += bytes;
offset
}
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
fn common_array_layout(
ty: &WasmArrayType,
header_size: u32,
header_align: u32,
expected_array_length_offset: u32,
) -> GcArrayLayout {
use core::mem;
assert!(header_size >= crate::VM_GC_HEADER_SIZE);
assert!(header_align >= crate::VM_GC_HEADER_ALIGN);
let mut size = header_size;
let mut align = header_align;
let length_field_size = u32::try_from(mem::size_of::<u32>()).unwrap();
let length_field_offset = field(&mut size, &mut align, length_field_size);
assert_eq!(length_field_offset, expected_array_length_offset);
let elem_size = byte_size_of_wasm_ty_in_gc_heap(&ty.0.element_type);
let elems_offset = align_up(&mut size, &mut align, elem_size);
assert_eq!(elems_offset, size);
let elems_are_gc_refs = ty.0.element_type.is_vmgcref_type_and_not_i31();
if elems_are_gc_refs {
debug_assert_eq!(
length_field_offset + length_field_size,
elems_offset,
"DRC collector relies on GC ref elements appearing directly after the length field, without any padding",
);
}
GcArrayLayout {
base_size: size,
align,
elem_size,
elems_are_gc_refs,
}
}
#[cfg(any(feature = "gc-null", feature = "gc-drc"))]
fn common_struct_or_exn_layout(
fields: &[crate::WasmFieldType],
header_size: u32,
header_align: u32,
) -> (u32, u32, Vec<GcStructLayoutField>) {
let mut size = header_size;
let mut align = header_align;
let fields = fields
.iter()
.map(|f| {
let field_size = byte_size_of_wasm_ty_in_gc_heap(&f.element_type);
let offset = field(&mut size, &mut align, field_size);
let is_gc_ref = f.element_type.is_vmgcref_type_and_not_i31();
GcStructLayoutField { offset, is_gc_ref }
})
.collect();
let align_size_to = align;
align_up(&mut size, &mut align, align_size_to);
(size, align, fields)
}
#[cfg(any(feature = "gc-null", feature = "gc-drc"))]
fn common_struct_layout(
ty: &WasmStructType,
header_size: u32,
header_align: u32,
) -> GcStructLayout {
assert!(header_size >= crate::VM_GC_HEADER_SIZE);
assert!(header_align >= crate::VM_GC_HEADER_ALIGN);
let (size, align, fields) = common_struct_or_exn_layout(&ty.fields, header_size, header_align);
GcStructLayout {
size,
align,
fields,
is_exception: false,
}
}
#[cfg(any(feature = "gc-null", feature = "gc-drc"))]
fn common_exn_layout(ty: &WasmExnType, header_size: u32, header_align: u32) -> GcStructLayout {
assert!(header_size >= crate::VM_GC_HEADER_SIZE);
assert!(header_align >= crate::VM_GC_HEADER_ALIGN);
assert!(header_align >= 8);
let header_size = header_size + 2 * u32::try_from(core::mem::size_of::<u32>()).unwrap();
let (size, align, fields) = common_struct_or_exn_layout(&ty.fields, header_size, header_align);
GcStructLayout {
size,
align,
fields,
is_exception: true,
}
}
pub trait GcTypeLayouts {
fn array_length_field_offset(&self) -> u32;
fn exception_tag_instance_offset(&self) -> u32;
fn exception_tag_defined_offset(&self) -> u32;
fn gc_layout(&self, ty: &WasmCompositeType) -> Option<GcLayout> {
assert!(!ty.shared);
match &ty.inner {
WasmCompositeInnerType::Array(ty) => Some(self.array_layout(ty).into()),
WasmCompositeInnerType::Struct(ty) => Some(self.struct_layout(ty).into()),
WasmCompositeInnerType::Func(_) => None,
WasmCompositeInnerType::Cont(_) => {
unimplemented!("Stack switching feature not compatible with GC, yet")
}
WasmCompositeInnerType::Exn(ty) => Some(self.exn_layout(ty).into()),
}
}
fn array_layout(&self, ty: &WasmArrayType) -> GcArrayLayout;
fn struct_layout(&self, ty: &WasmStructType) -> GcStructLayout;
fn exn_layout(&self, ty: &WasmExnType) -> GcStructLayout;
}
#[derive(Clone, Debug)]
pub enum GcLayout {
Array(GcArrayLayout),
Struct(GcStructLayout),
}
impl From<GcArrayLayout> for GcLayout {
fn from(layout: GcArrayLayout) -> Self {
Self::Array(layout)
}
}
impl From<GcStructLayout> for GcLayout {
fn from(layout: GcStructLayout) -> Self {
Self::Struct(layout)
}
}
impl GcLayout {
#[track_caller]
pub fn unwrap_struct(&self) -> &GcStructLayout {
match self {
Self::Struct(s) => s,
_ => panic!("GcLayout::unwrap_struct on non-struct GC layout"),
}
}
#[track_caller]
pub fn unwrap_array(&self) -> &GcArrayLayout {
match self {
Self::Array(a) => a,
_ => panic!("GcLayout::unwrap_array on non-array GC layout"),
}
}
}
#[derive(Clone, Debug)]
pub struct GcArrayLayout {
pub base_size: u32,
pub align: u32,
pub elem_size: u32,
pub elems_are_gc_refs: bool,
}
impl GcArrayLayout {
#[inline]
pub fn size_for_len(&self, len: u32) -> u32 {
self.elem_offset(len)
}
#[inline]
pub fn elem_offset(&self, i: u32) -> u32 {
self.base_size + i * self.elem_size
}
pub fn layout(&self, len: u32) -> Layout {
let size = self.size_for_len(len);
let size = usize::try_from(size).unwrap();
let align = usize::try_from(self.align).unwrap();
Layout::from_size_align(size, align).unwrap()
}
}
#[derive(Clone, Debug)]
pub struct GcStructLayout {
pub size: u32,
pub align: u32,
pub fields: Vec<GcStructLayoutField>,
pub is_exception: bool,
}
impl GcStructLayout {
pub fn layout(&self) -> Layout {
let size = usize::try_from(self.size).unwrap();
let align = usize::try_from(self.align).unwrap();
Layout::from_size_align(size, align).unwrap()
}
}
#[derive(Clone, Copy, Debug)]
pub struct GcStructLayoutField {
pub offset: u32,
pub is_gc_ref: bool,
}
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[rustfmt::skip]
#[expect(missing_docs, reason = "self-describing variants")]
pub enum VMGcKind {
ExternRef = 0b010000 << 26,
AnyRef = 0b100000 << 26,
EqRef = 0b101000 << 26,
ArrayRef = 0b101010 << 26,
StructRef = 0b101100 << 26,
ExnRef = 0b000001 << 26,
}
pub const VM_GC_KIND_SIZE: u8 = 4;
const _: () = assert!(VM_GC_KIND_SIZE as usize == core::mem::size_of::<VMGcKind>());
impl VMGcKind {
pub const MASK: u32 = 0b111111 << 26;
pub const UNUSED_MASK: u32 = !Self::MASK;
#[inline]
pub fn value_fits_in_unused_bits(value: u32) -> bool {
(value & Self::UNUSED_MASK) == value
}
#[inline]
pub fn from_high_bits_of_u32(val: u32) -> VMGcKind {
let masked = val & Self::MASK;
match masked {
x if x == Self::ExternRef.as_u32() => Self::ExternRef,
x if x == Self::AnyRef.as_u32() => Self::AnyRef,
x if x == Self::EqRef.as_u32() => Self::EqRef,
x if x == Self::ArrayRef.as_u32() => Self::ArrayRef,
x if x == Self::StructRef.as_u32() => Self::StructRef,
x if x == Self::ExnRef.as_u32() => Self::ExnRef,
_ => panic!("invalid `VMGcKind`: {masked:#032b}"),
}
}
#[inline]
pub fn matches(self, other: Self) -> bool {
(self.as_u32() & other.as_u32()) == other.as_u32()
}
#[inline]
pub fn as_u32(self) -> u32 {
self as u32
}
}
#[cfg(test)]
mod tests {
use super::VMGcKind::*;
use crate::prelude::*;
#[test]
fn kind_matches() {
let all = [ExternRef, AnyRef, EqRef, ArrayRef, StructRef, ExnRef];
for (sup, subs) in [
(ExternRef, vec![]),
(AnyRef, vec![EqRef, ArrayRef, StructRef]),
(EqRef, vec![ArrayRef, StructRef]),
(ArrayRef, vec![]),
(StructRef, vec![]),
(ExnRef, vec![]),
] {
assert!(sup.matches(sup));
for sub in &subs {
assert!(sub.matches(sup));
}
for kind in all.iter().filter(|k| **k != sup && !subs.contains(k)) {
assert!(!kind.matches(sup));
}
}
}
}