use crate::Engine;
use crate::error::OutOfMemory;
use crate::prelude::*;
use crate::sync::RwLock;
use crate::vm::GcRuntime;
use alloc::borrow::Cow;
use alloc::sync::Arc;
use core::cell::Cell;
use core::iter;
use core::{
borrow::Borrow,
fmt::{self, Debug},
hash::{Hash, Hasher},
ops::Range,
sync::atomic::{
AtomicBool, AtomicUsize,
Ordering::{AcqRel, Acquire, Release},
},
};
use wasmtime_core::slab::{Id as SlabId, Slab};
use wasmtime_environ::{
EngineOrModuleTypeIndex, EntityRef, GcLayout, ModuleInternedTypeIndex, ModuleTypes, TypeTrace,
Undo, VMSharedTypeIndex, WasmRecGroup, WasmSubType,
collections::{HashSet, PrimaryMap, SecondaryMap, TryClone as _, Vec},
iter_entity_range,
packed_option::{PackedOption, ReservedValue},
};
pub struct TypeCollection {
engine: Engine,
rec_groups: Vec<RecGroupEntry>,
types: PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
trampolines: SecondaryMap<VMSharedTypeIndex, PackedOption<ModuleInternedTypeIndex>>,
}
impl Debug for TypeCollection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let TypeCollection {
engine: _,
rec_groups,
types,
trampolines,
} = self;
f.debug_struct("TypeCollection")
.field("rec_groups", rec_groups)
.field("types", types)
.field("trampolines", trampolines)
.finish_non_exhaustive()
}
}
impl Engine {
#[must_use = "types are only registered as long as the `TypeCollection` is live"]
pub(crate) fn register_and_canonicalize_types<'a, I>(
&self,
module_types: &mut ModuleTypes,
env_modules: I,
) -> Result<TypeCollection, OutOfMemory>
where
I: IntoIterator<Item = &'a mut wasmtime_environ::Module>,
I::IntoIter: ExactSizeIterator,
{
if cfg!(debug_assertions) {
module_types
.trace(&mut |idx| match idx {
EngineOrModuleTypeIndex::Module(_) => Ok(()),
EngineOrModuleTypeIndex::Engine(_) | EngineOrModuleTypeIndex::RecGroup(_) => {
Err(idx)
}
})
.expect("should only have module type indices");
}
let engine = self.clone();
let registry = engine.signatures();
let gc_runtime = engine.gc_runtime().map(|rt| &**rt);
let (rec_groups, types) = registry
.0
.write()
.register_module_types(gc_runtime, module_types)?;
let mut trampolines = SecondaryMap::with_capacity(types.len())?;
for (module_ty, module_trampoline_ty) in module_types.trampoline_types() {
let shared_ty = types[module_ty];
let trampoline_shared_ty = registry.trampoline_type(shared_ty);
trampolines
.insert(trampoline_shared_ty, Some(module_trampoline_ty).into())
.expect("reserved space");
}
module_types.canonicalize_for_runtime_usage(&mut |idx| types[idx]);
for module in env_modules {
module.canonicalize_for_runtime_usage(&mut |idx| types[idx]);
}
Ok(TypeCollection {
engine,
rec_groups,
types,
trampolines,
})
}
}
impl TypeCollection {
pub fn as_module_map(&self) -> &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex> {
&self.types
}
#[inline]
pub fn shared_type(&self, index: ModuleInternedTypeIndex) -> Option<VMSharedTypeIndex> {
let shared_ty = self.types.get(index).copied();
log::trace!("TypeCollection::shared_type({index:?}) -> {shared_ty:?}");
shared_ty
}
#[inline]
pub fn trampoline_type(&self, ty: VMSharedTypeIndex) -> Option<ModuleInternedTypeIndex> {
let trampoline_ty = self.trampolines[ty].expand();
log::trace!("TypeCollection::trampoline_type({ty:?}) -> {trampoline_ty:?}");
trampoline_ty
}
}
impl Drop for TypeCollection {
fn drop(&mut self) {
if !self.rec_groups.is_empty() {
self.engine
.signatures()
.0
.write()
.unregister_type_collection(self);
}
}
}
#[inline]
fn shared_type_index_to_slab_id(index: VMSharedTypeIndex) -> SlabId {
assert!(!index.is_reserved_value());
SlabId::from_raw(index.bits())
}
#[inline]
fn slab_id_to_shared_type_index(id: SlabId) -> VMSharedTypeIndex {
let index = VMSharedTypeIndex::new(id.into_raw());
assert!(!index.is_reserved_value());
index
}
pub struct RegisteredType {
engine: Engine,
entry: RecGroupEntry,
ty: Arc<WasmSubType>,
index: VMSharedTypeIndex,
layout: Option<GcLayout>,
}
impl Debug for RegisteredType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let RegisteredType {
engine: _,
entry: _,
ty,
index,
layout,
} = self;
f.debug_struct("RegisteredType")
.field("index", index)
.field("ty", ty)
.field("layout", layout)
.finish_non_exhaustive()
}
}
impl Clone for RegisteredType {
fn clone(&self) -> Self {
self.engine.signatures().debug_assert_contains(self.index);
self.entry.incref("RegisteredType::clone");
RegisteredType {
engine: self.engine.clone(),
entry: self.entry.clone(),
ty: self.ty.clone(),
index: self.index,
layout: self.layout.clone(),
}
}
}
impl Drop for RegisteredType {
fn drop(&mut self) {
self.engine.signatures().debug_assert_contains(self.index);
if self.entry.decref("RegisteredType::drop") {
self.engine
.signatures()
.0
.write()
.unregister_entry(self.entry.clone());
}
}
}
impl core::ops::Deref for RegisteredType {
type Target = WasmSubType;
fn deref(&self) -> &Self::Target {
&self.ty
}
}
impl PartialEq for RegisteredType {
fn eq(&self, other: &Self) -> bool {
self.engine.signatures().debug_assert_contains(self.index);
other.engine.signatures().debug_assert_contains(other.index);
let eq = self.index == other.index && Engine::same(&self.engine, &other.engine);
if cfg!(debug_assertions) && eq {
assert!(Arc::ptr_eq(&self.entry.0, &other.entry.0));
assert_eq!(self.ty, other.ty);
}
eq
}
}
impl Eq for RegisteredType {}
impl Hash for RegisteredType {
fn hash<H: Hasher>(&self, state: &mut H) {
self.engine.signatures().debug_assert_contains(self.index);
let ptr = Arc::as_ptr(&self.entry.0);
ptr.hash(state);
}
}
impl RegisteredType {
pub fn new(engine: &Engine, ty: WasmSubType) -> Result<RegisteredType, OutOfMemory> {
let (entry, index, ty, layout) = {
log::trace!("RegisteredType::new({ty:?})");
let gc_runtime = engine.gc_runtime().map(|rt| &**rt);
let mut inner = engine.signatures().0.write();
inner.assert_canonicalized_for_runtime_usage_in_this_registry(&ty);
let entry = inner.register_singleton_rec_group(gc_runtime, ty)?;
let index = entry.0.shared_type_indices[0];
let id = shared_type_index_to_slab_id(index);
let ty = inner.types[id].clone().unwrap();
let layout = inner.type_to_gc_layout.get(index).and_then(|l| l.clone());
(entry, index, ty, layout)
};
Ok(RegisteredType::from_parts(
engine.clone(),
entry,
index,
ty,
layout,
))
}
pub fn root(engine: &Engine, index: VMSharedTypeIndex) -> RegisteredType {
engine.signatures().debug_assert_contains(index);
let (entry, ty, layout) = {
let id = shared_type_index_to_slab_id(index);
let inner = engine.signatures().0.read();
let ty = inner.types[id].clone().unwrap();
let entry = inner.type_to_rec_group[index].clone().unwrap();
let layout = inner.type_to_gc_layout.get(index).and_then(|l| l.clone());
entry.incref("RegisteredType::root");
(entry, ty, layout)
};
RegisteredType::from_parts(engine.clone(), entry, index, ty, layout)
}
fn from_parts(
engine: Engine,
entry: RecGroupEntry,
index: VMSharedTypeIndex,
ty: Arc<WasmSubType>,
layout: Option<GcLayout>,
) -> Self {
log::trace!(
"RegisteredType::from_parts({engine:?}, {entry:?}, {index:?}, {ty:?}, {layout:?})"
);
engine.signatures().debug_assert_contains(index);
debug_assert!(
entry.0.registrations.load(Acquire) != 0,
"entry should have a non-zero registration count"
);
RegisteredType {
engine,
entry,
ty,
index,
layout,
}
}
pub fn engine(&self) -> &Engine {
&self.engine
}
pub fn index(&self) -> VMSharedTypeIndex {
self.index
}
#[cfg(feature = "gc")]
pub fn layout(&self) -> Option<&GcLayout> {
self.layout.as_ref()
}
}
#[derive(Clone)]
struct RecGroupEntry(Arc<RecGroupEntryInner>);
impl Debug for RecGroupEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct FormatAsPtr<'a, P>(&'a P);
impl<P: fmt::Pointer> Debug for FormatAsPtr<'_, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:#p}", *self.0)
}
}
f.debug_tuple("RecGroupEntry")
.field(&FormatAsPtr(&self.0))
.finish()
}
}
struct RecGroupEntryInner {
hash_consing_key: WasmRecGroup,
shared_type_indices: Box<[VMSharedTypeIndex]>,
registrations: AtomicUsize,
unregistered: AtomicBool,
}
impl PartialEq for RecGroupEntry {
fn eq(&self, other: &Self) -> bool {
self.0.hash_consing_key == other.0.hash_consing_key
}
}
impl Eq for RecGroupEntry {}
impl Hash for RecGroupEntry {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash_consing_key.hash(state);
}
}
impl Borrow<WasmRecGroup> for RecGroupEntry {
#[inline]
fn borrow(&self) -> &WasmRecGroup {
&self.0.hash_consing_key
}
}
impl RecGroupEntry {
fn new_inner() -> Result<Arc<RecGroupEntryInner>, OutOfMemory> {
try_new(RecGroupEntryInner {
hash_consing_key: Default::default(),
shared_type_indices: Default::default(),
registrations: AtomicUsize::new(0),
unregistered: AtomicBool::new(false),
})
}
fn init(
mut inner: Arc<RecGroupEntryInner>,
key: WasmRecGroup,
shared_types: Box<[VMSharedTypeIndex]>,
) -> Self {
debug_assert!(key.is_canonicalized_for_hash_consing());
let RecGroupEntryInner {
hash_consing_key,
shared_type_indices,
registrations: _,
unregistered: _,
} = Arc::get_mut(&mut inner).expect("must have the only handle to this inner entry");
debug_assert!(shared_type_indices.is_empty());
*shared_type_indices = shared_types;
debug_assert!(hash_consing_key.types.is_empty());
*hash_consing_key = key;
RecGroupEntry(inner)
}
fn incref(&self, why: &str) {
let old_count = self.0.registrations.fetch_add(1, AcqRel);
log::trace!("incref({self:?}) -> count {}: {why}", old_count + 1);
}
#[must_use = "caller must remove entry from registry if `decref` returns `true`"]
fn decref(&self, why: &str) -> bool {
let old_count = self.0.registrations.fetch_sub(1, AcqRel);
debug_assert_ne!(old_count, 0);
log::trace!("decref({self:?}) -> count {}: {why}", old_count - 1);
old_count == 1
}
}
#[derive(Debug, Default)]
struct TypeRegistryInner {
hash_consing_map: HashSet<RecGroupEntry>,
types: Slab<Option<Arc<WasmSubType>>>,
type_to_rec_group: SecondaryMap<VMSharedTypeIndex, Option<RecGroupEntry>>,
type_to_supertypes: SecondaryMap<VMSharedTypeIndex, Option<Box<[VMSharedTypeIndex]>>>,
type_to_trampoline: SecondaryMap<VMSharedTypeIndex, PackedOption<VMSharedTypeIndex>>,
type_to_gc_layout: SecondaryMap<VMSharedTypeIndex, Option<GcLayout>>,
drop_stack: Vec<RecGroupEntry>,
}
impl TypeRegistryInner {
#[inline]
#[track_caller]
fn debug_assert_registered(&self, index: VMSharedTypeIndex) {
debug_assert!(
!index.is_reserved_value(),
"should have an actual VMSharedTypeIndex, not the reserved value"
);
debug_assert!(
self.types.contains(shared_type_index_to_slab_id(index)),
"registry's slab should contain {index:?}",
);
debug_assert!(
self.types[shared_type_index_to_slab_id(index)].is_some(),
"registry's slab should actually contain a type for {index:?}",
);
debug_assert!(
self.type_to_rec_group[index].is_some(),
"{index:?} should have an associated rec group entry"
);
}
#[inline]
#[track_caller]
fn debug_assert_all_registered(&self, entry: &RecGroupEntry) {
if cfg!(debug_assertions) {
for &ty in &entry.0.shared_type_indices {
self.debug_assert_registered(ty);
}
}
}
fn register_module_types(
&mut self,
gc_runtime: Option<&dyn GcRuntime>,
types: &ModuleTypes,
) -> Result<
(
Vec<RecGroupEntry>,
PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
),
OutOfMemory,
> {
log::trace!("Start registering module types");
let mut entries = Vec::with_capacity(types.rec_groups().len())?;
let mut map = PrimaryMap::<ModuleInternedTypeIndex, VMSharedTypeIndex>::with_capacity(
types.wasm_types().len(),
)?;
for (_rec_group_index, module_group) in types.rec_groups() {
let entry = self.register_rec_group(
gc_runtime,
&map,
module_group.clone(),
iter_entity_range(module_group.clone()).map(|ty| types[ty].clone()),
)?;
for (module_ty, engine_ty) in
iter_entity_range(module_group).zip(entry.0.shared_type_indices.iter())
{
let module_ty2 = map.push(*engine_ty).expect("reserved capacity");
assert_eq!(module_ty, module_ty2);
}
entries.push(entry).expect("reserved capacity");
}
log::trace!("End registering module types");
Ok((entries, map))
}
fn register_rec_group(
&mut self,
gc_runtime: Option<&dyn GcRuntime>,
map: &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
range: Range<ModuleInternedTypeIndex>,
types: impl ExactSizeIterator<Item = WasmSubType>,
) -> Result<RecGroupEntry, OutOfMemory> {
log::trace!("registering rec group of length {}", types.len());
debug_assert_eq!(iter_entity_range(range.clone()).len(), types.len());
let mut non_canon_types = Vec::with_capacity(types.len())?;
let hash_consing_key = WasmRecGroup {
types: types
.zip(iter_entity_range(range.clone()))
.map(|(mut ty, module_index)| {
non_canon_types
.push((module_index, ty.try_clone()?))
.expect("reserved capacity");
ty.canonicalize_for_hash_consing(range.clone(), &mut |idx| {
debug_assert!(idx < range.clone().start);
map[idx]
});
Ok(ty)
})
.try_collect::<Box<[_]>, OutOfMemory>()?,
};
if cfg!(debug_assertions) {
hash_consing_key
.trace_engine_indices::<_, ()>(&mut |index| Ok(self.debug_assert_registered(index)))
.unwrap();
}
if let Some(entry) = self.hash_consing_map.get(&hash_consing_key) {
log::trace!("hash-consing map hit: reusing {entry:?}");
assert_eq!(entry.0.unregistered.load(Acquire), false);
self.debug_assert_all_registered(entry);
entry.incref("hash-consing map hit");
Ok(entry.clone())
} else {
log::trace!("hash-consing map miss: making new registration");
self.register_new_rec_group(gc_runtime, map, range, hash_consing_key, non_canon_types)
}
}
fn register_new_rec_group(
&mut self,
gc_runtime: Option<&(dyn GcRuntime + 'static)>,
map: &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
range: Range<ModuleInternedTypeIndex>,
hash_consing_key: WasmRecGroup,
mut non_canon_types: Vec<(ModuleInternedTypeIndex, WasmSubType)>,
) -> Result<RecGroupEntry, OutOfMemory> {
debug_assert!(hash_consing_key.is_canonicalized_for_hash_consing());
debug_assert_eq!(self.hash_consing_map.contains(&hash_consing_key), false);
let entry = {
let num_types = non_canon_types.len();
self.reserve_capacity_for_rec_group(num_types)?;
let mut shared_type_indices = Vec::new();
shared_type_indices.reserve_exact(num_types)?;
let entry_inner = RecGroupEntry::new_inner()?;
let shared_type_indices =
self.assign_shared_type_indices(&non_canon_types, shared_type_indices);
RecGroupEntry::init(entry_inner, hash_consing_key, shared_type_indices)
};
let did_incref = Cell::new(false);
let entry2 = entry.clone();
let mut registry = Undo::new(self, |registry| {
registry.remove_entry_impl(&entry2, did_incref.get());
registry.drain_drop_stack();
});
registry.canonicalize_entry_types_for_runtime_usage(
map,
&entry,
non_canon_types.iter_mut().map(|(_, ty)| ty),
range.clone(),
);
registry.incref_outgoing_edges(&entry);
did_incref.set(true);
registry.insert_entry_types(&entry, non_canon_types.into_iter().map(|(_, ty)| ty))?;
registry.insert_entry_rec_groups(&entry);
registry.insert_entry_supertypes(&entry)?;
registry.insert_entry_trampolines(gc_runtime, &entry)?;
registry.insert_entry_gc_layouts(gc_runtime, &entry)?;
let is_new_entry = registry.hash_consing_map.insert(entry.clone())?;
debug_assert!(is_new_entry);
registry.debug_assert_all_registered(&entry);
Undo::commit(registry);
entry.incref("creation");
Ok(entry)
}
fn remove_entry_impl(&mut self, entry: &RecGroupEntry, should_decref: bool) {
assert_eq!(entry.0.registrations.load(Acquire), 0);
assert_eq!(entry.0.unregistered.load(Acquire), false);
entry.0.unregistered.store(true, Release);
self.hash_consing_map.remove(&entry.0.hash_consing_key);
self.remove_entry_gc_layouts(&entry);
self.remove_entry_trampolines(&entry);
self.remove_entry_supertypes(&entry);
self.remove_entry_rec_groups(&entry);
self.remove_entry_types(&entry);
if should_decref {
self.decref_outgoing_edges(&entry);
}
}
fn insert_entry_types(
&mut self,
entry: &RecGroupEntry,
sub_types: impl ExactSizeIterator<Item = WasmSubType>,
) -> Result<(), OutOfMemory> {
debug_assert_eq!(entry.0.shared_type_indices.len(), sub_types.len());
for (ty_idx, sub_ty) in entry.0.shared_type_indices.iter().copied().zip(sub_types) {
debug_assert!(sub_ty.is_canonicalized_for_runtime_usage());
let id = shared_type_index_to_slab_id(ty_idx);
debug_assert!(self.types.contains(id));
debug_assert!(self.types[id].is_none());
self.types[id] = Some(try_new(sub_ty)?);
}
Ok(())
}
fn remove_entry_types(&mut self, entry: &RecGroupEntry) {
for &ty in &entry.0.shared_type_indices {
let id = shared_type_index_to_slab_id(ty);
debug_assert!(self.types.contains(id));
self.types.dealloc(id);
}
}
fn insert_entry_rec_groups(&mut self, entry: &RecGroupEntry) {
debug_assert!(self.type_to_rec_group.capacity() >= self.types.len());
for &ty in &entry.0.shared_type_indices {
debug_assert!(self.type_to_rec_group[ty].is_none());
debug_assert!(ty.index() < self.type_to_rec_group.capacity());
self.type_to_rec_group
.insert(ty, Some(entry.clone()))
.expect("reserved capacity");
}
}
fn remove_entry_rec_groups(&mut self, entry: &RecGroupEntry) {
for &ty in &entry.0.shared_type_indices {
debug_assert!(ty.index() < self.type_to_rec_group.capacity());
self.type_to_rec_group.remove(ty);
}
}
fn insert_entry_supertypes(&mut self, entry: &RecGroupEntry) -> Result<(), OutOfMemory> {
for &ty in &entry.0.shared_type_indices {
let id = shared_type_index_to_slab_id(ty);
if let Some(supertype) = self.types[id].as_ref().unwrap().supertype {
debug_assert!(self.type_to_supertypes.capacity() <= self.types.capacity());
if self.type_to_supertypes.capacity() < self.types.capacity() {
log::trace!("type_to_supertypes.resize({})", self.types.capacity());
self.type_to_supertypes.resize(self.types.capacity())?;
}
let supertype = supertype.unwrap_engine_type_index();
let supers_supertypes = self.supertypes(supertype);
let supertypes = supers_supertypes
.iter()
.copied()
.chain(iter::once(supertype))
.try_collect()?;
self.type_to_supertypes
.insert(ty, Some(supertypes))
.expect("reserved capacity");
}
}
Ok(())
}
fn remove_entry_supertypes(&mut self, entry: &RecGroupEntry) {
if self.type_to_supertypes.capacity() == 0 {
return;
}
for &ty in &entry.0.shared_type_indices {
self.type_to_supertypes.remove(ty);
}
}
fn insert_entry_trampolines(
&mut self,
gc_runtime: Option<&(dyn GcRuntime + 'static)>,
entry: &RecGroupEntry,
) -> Result<(), OutOfMemory> {
for &ty_idx in &entry.0.shared_type_indices {
let id = shared_type_index_to_slab_id(ty_idx);
debug_assert!(self.types.contains(id));
debug_assert!(self.types[id].is_some());
let sub_ty = self.types[id].as_ref().unwrap();
let Some(func_ty) = sub_ty.as_func() else {
continue;
};
let trampoline_ty = match func_ty.trampoline_type()? {
Cow::Owned(ty) => ty,
Cow::Borrowed(ty) if !sub_ty.is_final || sub_ty.supertype.is_some() => {
ty.try_clone()?
}
Cow::Borrowed(_) => {
debug_assert!(func_ty.is_trampoline_type());
log::trace!("trampoline_type({ty_idx:?}) = {ty_idx:?}");
continue;
}
};
debug_assert!(self.type_to_trampoline.capacity() <= self.types.capacity());
if self.type_to_trampoline.capacity() < self.types.capacity() {
log::trace!("type_to_trampoline.resize({})", self.types.capacity());
self.type_to_trampoline.resize(self.types.capacity())?;
}
let trampoline_sub_ty = WasmSubType {
is_final: true,
supertype: None,
composite_type: wasmtime_environ::WasmCompositeType {
shared: sub_ty.composite_type.shared,
inner: wasmtime_environ::WasmCompositeInnerType::Func(trampoline_ty),
},
};
let trampoline_entry =
self.register_singleton_rec_group(gc_runtime, trampoline_sub_ty)?;
assert_eq!(trampoline_entry.0.shared_type_indices.len(), 1);
let trampoline_index = trampoline_entry.0.shared_type_indices[0];
self.debug_assert_registered(trampoline_index);
debug_assert_ne!(ty_idx, trampoline_index);
self.type_to_trampoline
.insert(ty_idx, Some(trampoline_index).into())
.expect("reserved capacity");
log::trace!("trampoline_type({ty_idx:?}) = {trampoline_index:?}");
}
Ok(())
}
fn remove_entry_trampolines(&mut self, entry: &RecGroupEntry) {
if self.type_to_trampoline.capacity() == 0 {
return;
}
for &ty in &entry.0.shared_type_indices {
if let Some(tramp_ty) = self.type_to_trampoline.remove(ty).and_then(|x| x.expand()) {
self.debug_assert_registered(tramp_ty);
let tramp_entry = self.type_to_rec_group[tramp_ty].as_ref().unwrap();
if tramp_entry.decref("dropping rec group's trampoline-type references") {
self.push_to_drop_stack(tramp_entry.clone());
}
}
}
self.drain_drop_stack();
}
fn insert_entry_gc_layouts(
&mut self,
gc_runtime: Option<&(dyn GcRuntime + 'static)>,
entry: &RecGroupEntry,
) -> Result<(), OutOfMemory> {
let Some(gc_runtime) = gc_runtime else {
debug_assert!(entry.0.shared_type_indices.iter().all(|ty| {
let id = shared_type_index_to_slab_id(*ty);
let sub_ty = self.types[id].as_ref().unwrap();
assert!(!sub_ty.composite_type.shared);
matches!(
&sub_ty.composite_type.inner,
wasmtime_environ::WasmCompositeInnerType::Func(_)
)
}));
return Ok(());
};
for &ty_idx in &entry.0.shared_type_indices {
let id = shared_type_index_to_slab_id(ty_idx);
let sub_ty = self.types[id].as_ref().unwrap();
assert!(!sub_ty.composite_type.shared);
let gc_layout = match &sub_ty.composite_type.inner {
wasmtime_environ::WasmCompositeInnerType::Func(_) => continue,
wasmtime_environ::WasmCompositeInnerType::Array(a) => {
gc_runtime.layouts().array_layout(a).into()
}
wasmtime_environ::WasmCompositeInnerType::Struct(s) => {
gc_runtime.layouts().struct_layout(s).into()
}
wasmtime_environ::WasmCompositeInnerType::Exn(e) => {
gc_runtime.layouts().exn_layout(e).into()
}
wasmtime_environ::WasmCompositeInnerType::Cont(_) => continue, };
debug_assert!(self.type_to_gc_layout.capacity() <= self.types.capacity());
if self.type_to_gc_layout.capacity() < self.types.capacity() {
log::trace!("type_to_gc_layout.resize({})", self.types.capacity());
self.type_to_gc_layout.resize(self.types.capacity())?;
}
self.type_to_gc_layout
.insert(ty_idx, Some(gc_layout))
.expect("reserved capacity");
}
Ok(())
}
fn remove_entry_gc_layouts(&mut self, entry: &RecGroupEntry) {
if self.type_to_gc_layout.capacity() == 0 {
return;
}
for ty in &entry.0.shared_type_indices {
self.type_to_gc_layout.remove(*ty);
}
}
fn assign_shared_type_indices(
&mut self,
non_canon_types: &[(ModuleInternedTypeIndex, WasmSubType)],
mut shared_type_indices: Vec<VMSharedTypeIndex>,
) -> Box<[VMSharedTypeIndex]> {
debug_assert_eq!(non_canon_types.len(), shared_type_indices.capacity());
debug_assert!(shared_type_indices.is_empty());
debug_assert!(
self.types.capacity() - self.types.len() >= non_canon_types.len(),
"should have reserved capacity"
);
for (module_index, ty) in non_canon_types.iter() {
let engine_index =
slab_id_to_shared_type_index(self.types.alloc(None).expect("have capacity"));
log::trace!("reserved {engine_index:?} for {module_index:?} = non-canonical {ty:?}");
shared_type_indices
.push(engine_index)
.expect("reserved capacity");
}
debug_assert_eq!(shared_type_indices.len(), shared_type_indices.capacity());
shared_type_indices
.into_boxed_slice()
.expect("capacity should be exact")
}
fn incref_outgoing_edges(&mut self, entry: &RecGroupEntry) {
let key = &entry.0.hash_consing_key;
debug_assert!(key.is_canonicalized_for_hash_consing());
key.trace_engine_indices::<_, ()>(&mut |index| {
self.debug_assert_registered(index);
let other_entry = self.type_to_rec_group[index].as_ref().unwrap();
assert_eq!(other_entry.0.unregistered.load(Acquire), false);
other_entry.incref("new rec group's type references");
Ok(())
})
.unwrap();
}
fn decref_outgoing_edges(&mut self, entry: &RecGroupEntry) {
let key = &entry.0.hash_consing_key;
debug_assert!(key.is_canonicalized_for_hash_consing());
key.trace_engine_indices::<_, ()>(&mut |other_index| {
self.debug_assert_registered(other_index);
let other_entry = self.type_to_rec_group[other_index].as_ref().unwrap();
assert_eq!(other_entry.0.unregistered.load(Acquire), false);
if other_entry.decref("dropping rec group's type references") {
self.push_to_drop_stack(other_entry.clone());
}
Ok(())
})
.unwrap();
self.drain_drop_stack();
}
fn reserve_capacity_for_rec_group(&mut self, num_types: usize) -> Result<(), OutOfMemory> {
log::trace!("Reserving capacity for rec group of {num_types} types");
let TypeRegistryInner {
hash_consing_map,
types,
type_to_rec_group,
drop_stack,
type_to_supertypes: _,
type_to_trampoline: _,
type_to_gc_layout: _,
} = self;
log::trace!(" hash_consing_map.reserve(1)");
hash_consing_map.reserve(1)?;
log::trace!(" types.reserve({num_types})");
types.reserve(num_types)?;
let types_capacity = types.capacity();
log::trace!(" type_to_rec_group.resize({types_capacity})");
type_to_rec_group.resize(types_capacity)?;
log::trace!(" type_to_rec_group.reserve({types_capacity})");
debug_assert!(drop_stack.is_empty());
drop_stack.reserve(types_capacity)?;
Ok(())
}
fn canonicalize_entry_types_for_runtime_usage<'a>(
&self,
map: &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
entry: &RecGroupEntry,
sub_tys: impl ExactSizeIterator<Item = &'a mut WasmSubType>,
range: Range<ModuleInternedTypeIndex>,
) {
debug_assert_eq!(sub_tys.len(), entry.0.shared_type_indices.len());
for (engine_index, ty) in entry.0.shared_type_indices.iter().copied().zip(sub_tys) {
self.canonicalize_type_for_runtime_usage(map, &entry, engine_index, ty, range.clone());
}
}
fn canonicalize_type_for_runtime_usage(
&self,
map: &PrimaryMap<ModuleInternedTypeIndex, VMSharedTypeIndex>,
entry: &RecGroupEntry,
engine_index: VMSharedTypeIndex,
ty: &mut WasmSubType,
range: Range<ModuleInternedTypeIndex>,
) {
log::trace!("canonicalizing {engine_index:?} for runtime usage");
ty.canonicalize_for_runtime_usage(&mut |module_index| {
if module_index < range.start {
let engine_index = map[module_index];
log::trace!(" cross-group {module_index:?} becomes {engine_index:?}");
self.debug_assert_registered(engine_index);
engine_index
} else {
assert!(module_index < range.end);
let rec_group_offset = module_index.as_u32() - range.start.as_u32();
let rec_group_offset = usize::try_from(rec_group_offset).unwrap();
let engine_index = entry.0.shared_type_indices[rec_group_offset];
log::trace!(" intra-group {module_index:?} becomes {engine_index:?}");
assert!(!engine_index.is_reserved_value());
assert!(
self.types
.contains(shared_type_index_to_slab_id(engine_index))
);
engine_index
}
});
}
#[track_caller]
fn assert_canonicalized_for_runtime_usage_in_this_registry(&self, ty: &WasmSubType) {
ty.trace::<_, ()>(&mut |index| match index {
EngineOrModuleTypeIndex::RecGroup(_) | EngineOrModuleTypeIndex::Module(_) => {
panic!("not canonicalized for runtime usage: {ty:?}")
}
EngineOrModuleTypeIndex::Engine(idx) => {
self.debug_assert_registered(idx);
Ok(())
}
})
.unwrap();
}
fn supertypes(&self, ty: VMSharedTypeIndex) -> &[VMSharedTypeIndex] {
self.type_to_supertypes
.get(ty)
.and_then(|s| s.as_deref())
.unwrap_or(&[])
}
fn register_singleton_rec_group(
&mut self,
gc_runtime: Option<&dyn GcRuntime>,
ty: WasmSubType,
) -> Result<RecGroupEntry, OutOfMemory> {
self.assert_canonicalized_for_runtime_usage_in_this_registry(&ty);
let map = PrimaryMap::default();
let range = ModuleInternedTypeIndex::from_bits(u32::MAX - 1)
..ModuleInternedTypeIndex::from_bits(u32::MAX);
self.register_rec_group(gc_runtime, &map, range, iter::once(ty))
}
fn unregister_type_collection(&mut self, collection: &TypeCollection) {
log::trace!("Begin unregistering `TypeCollection`");
for entry in &collection.rec_groups {
self.debug_assert_all_registered(entry);
if entry.decref("TypeRegistryInner::unregister_type_collection") {
self.unregister_entry(entry.clone());
}
}
log::trace!("Finished unregistering `TypeCollection`");
}
fn unregister_entry(&mut self, entry: RecGroupEntry) {
log::trace!("Attempting to unregister {entry:?}");
debug_assert!(self.drop_stack.is_empty());
let registrations = entry.0.registrations.load(Acquire);
if registrations != 0 {
log::trace!(
" {entry:?} was concurrently resurrected and no longer has \
zero registrations (registrations -> {registrations})",
);
assert_eq!(entry.0.unregistered.load(Acquire), false);
return;
}
if entry.0.unregistered.load(Acquire) {
log::trace!(
" {entry:?} was concurrently resurrected, dropped again, \
and already unregistered"
);
return;
}
debug_assert!(self.drop_stack.capacity() >= self.types.capacity());
self.push_to_drop_stack(entry);
self.drain_drop_stack();
}
fn push_to_drop_stack(&mut self, entry: RecGroupEntry) {
log::trace!("Pushing entry to drop stack: {entry:?}");
self.drop_stack
.push(entry)
.expect("always have space in `drop_stack` for all types");
}
fn drain_drop_stack(&mut self) {
if self.drop_stack.is_empty() {
return;
}
log::trace!("Draining drop stack");
while let Some(entry) = self.drop_stack.pop() {
log::trace!("Begin unregistering {entry:?}");
self.debug_assert_all_registered(&entry);
self.remove_entry_impl(&entry, true);
log::trace!("End unregistering {entry:?}");
}
}
}
#[cfg(debug_assertions)]
impl Drop for TypeRegistryInner {
fn drop(&mut self) {
let TypeRegistryInner {
hash_consing_map,
types,
type_to_rec_group,
type_to_supertypes,
type_to_trampoline,
type_to_gc_layout,
drop_stack,
} = self;
assert!(
hash_consing_map.is_empty(),
"type registry not empty: hash consing map is not empty: {hash_consing_map:#?}"
);
assert!(
types.is_empty(),
"type registry not empty: types slab is not empty: {types:#?}"
);
assert!(
type_to_rec_group.is_empty() || type_to_rec_group.values().all(|x| x.is_none()),
"type registry not empty: type-to-rec-group map is not empty: {type_to_rec_group:#?}"
);
assert!(
type_to_supertypes.is_empty() || type_to_supertypes.values().all(|x| x.is_none()),
"type registry not empty: type-to-supertypes map is not empty: {type_to_supertypes:#?}"
);
assert!(
type_to_trampoline.is_empty() || type_to_trampoline.values().all(|x| x.is_none()),
"type registry not empty: type-to-trampoline map is not empty: {type_to_trampoline:#?}"
);
assert!(
type_to_gc_layout.is_empty() || type_to_gc_layout.values().all(|x| x.is_none()),
"type registry not empty: type-to-gc-layout map is not empty: {type_to_gc_layout:#?}"
);
assert!(
drop_stack.is_empty(),
"type registry not empty: drop stack is not empty: {drop_stack:#?}"
);
}
}
#[derive(Debug)]
pub struct TypeRegistry(RwLock<TypeRegistryInner>);
impl TypeRegistry {
pub fn new() -> Self {
Self(RwLock::new(TypeRegistryInner::default()))
}
#[inline]
pub fn debug_assert_contains(&self, index: VMSharedTypeIndex) {
if cfg!(debug_assertions) {
self.0.read().debug_assert_registered(index);
}
}
pub fn borrow(&self, index: VMSharedTypeIndex) -> Option<Arc<WasmSubType>> {
let id = shared_type_index_to_slab_id(index);
let inner = self.0.read();
inner.types.get(id).and_then(|ty| ty.clone())
}
pub fn layout(&self, index: VMSharedTypeIndex) -> Option<GcLayout> {
let inner = self.0.read();
inner.type_to_gc_layout.get(index).and_then(|l| l.clone())
}
pub fn trampoline_type(&self, index: VMSharedTypeIndex) -> VMSharedTypeIndex {
let slab_id = shared_type_index_to_slab_id(index);
let inner = self.0.read();
inner.debug_assert_registered(index);
let ty = inner.types[slab_id].as_ref().unwrap();
debug_assert!(
ty.is_func(),
"cannot get the trampoline type of a non-function type: {index:?} = {ty:?}"
);
match inner.type_to_trampoline.get(index).and_then(|x| x.expand()) {
Some(ty) => ty,
None => {
index
}
}
}
#[inline]
pub fn is_subtype(&self, sub: VMSharedTypeIndex, sup: VMSharedTypeIndex) -> bool {
if cfg!(debug_assertions) {
self.0.read().debug_assert_registered(sub);
self.0.read().debug_assert_registered(sup);
}
if sub == sup {
return true;
}
self.is_subtype_slow(sub, sup)
}
fn is_subtype_slow(&self, sub: VMSharedTypeIndex, sup: VMSharedTypeIndex) -> bool {
let inner = self.0.read();
let sub_supertypes = inner.supertypes(sub);
let sup_supertypes = inner.supertypes(sup);
sub_supertypes.get(sup_supertypes.len()) == Some(&sup)
}
}