comet/
gc_info_table.rs

1use std::{
2    any::TypeId,
3    collections::hash_map::DefaultHasher,
4    hash::Hash,
5    hash::Hasher,
6    mem::{size_of, ManuallyDrop, MaybeUninit},
7    ptr::null_mut,
8    sync::atomic::AtomicU16,
9};
10
11use crate::internal::{
12    finalize_trait::FinalizationCallback, gc_info::GCInfoIndex, trace_trait::TraceCallback,
13};
14#[cfg(not(target_arch = "wasm32"))]
15use crate::mmap::Mmap;
16
17/// GCInfo contains metadata for objects that implement `GCInfoTrait`.
18#[repr(C)]
19pub struct GCInfo {
20    /// Finalization callback
21    pub finalize: Option<FinalizationCallback>,
22    /// Tracing callback
23    pub trace: TraceCallback,
24    /// Additional vtable pointer. This is not used in any way by Comet but might be used by 
25    /// programmers to store vtable by runtimes without occupying additional 8/4 bytes of space
26    /// in allocation itself.
27    pub vtable: usize,
28}
29
30/// Efficent storage for [GCInfo].
31pub struct GCInfoTable {
32    #[cfg(not(wasm))]
33    map: Mmap,
34    table: *mut GCInfo,
35    type_id_map: MaybeUninit<Vec<AtomicU16>>,
36    current_index: AtomicU16,
37}
38
39pub(crate) static mut GC_TABLE: GCInfoTable = GCInfoTable {
40    table: null_mut(),
41    current_index: AtomicU16::new(1),
42    type_id_map: MaybeUninit::uninit(),
43    #[cfg(not(wasm))]
44    map: Mmap::uninit(),
45};
46
47impl GCInfoTable {
48    /// At maximum [`MAX_INDEX - 1`](GCInfoTable::MAX_INDEX) indices are supported.
49    ///
50    /// We assume that 14 bits are enough to represent all possible types.
51    pub const MAX_INDEX: u16 = 1 << 14;
52    /// Minimum index returned. Values smaller [`MIN_INDEX`](GCInfoTable::MIN_INDEX) may be used as
53    /// sentinels.
54    pub const MIN_INDEX: u16 = 1;
55
56    pub const INITIAL_WANTED_LIMIT: u16 = 512;
57
58    pub(crate) unsafe fn init(mem: Option<&'static mut [u8]>) {
59        #[cfg(wasm)]
60        {
61            GC_TABLE.table = mem.unwrap().as_mut_ptr();
62        }
63        #[cfg(not(wasm))]
64        {
65            let _ = mem;
66            let map = Mmap::new(Self::MAX_INDEX as usize * size_of::<GCInfo>());
67            GC_TABLE.map = map;
68            GC_TABLE.table = GC_TABLE.map.start().cast();
69        }
70        let mut v = ManuallyDrop::new(vec![0u16; Self::MAX_INDEX as usize]);
71        *GC_TABLE.type_id_map.as_mut_ptr() =
72            Vec::from_raw_parts(v.as_mut_ptr().cast::<AtomicU16>(), v.len(), v.capacity());
73    }
74    pub(crate) fn add_gc_info_type_id(&mut self, type_id: TypeId, info: GCInfo) -> GCInfoIndex {
75        unsafe {
76            let mut hasher = DefaultHasher::default();
77            type_id.hash(&mut hasher);
78            let key = hasher.finish();
79            let table_idx = key % (*self.type_id_map.as_ptr()).len() as u64;
80            let index = &(*self.type_id_map.as_ptr())[table_idx as usize];
81            let index_ = index.load(std::sync::atomic::Ordering::Acquire);
82            if index_ != 0 {
83                return GCInfoIndex(index_);
84            }
85            let index_ = self.add_gc_info(info);
86            index.store(index_.0, std::sync::atomic::Ordering::Release);
87            index_
88        }
89    }
90
91    pub unsafe fn add_gc_info(&mut self, info: GCInfo) -> GCInfoIndex {
92        let index = self
93            .current_index
94            .fetch_add(1, std::sync::atomic::Ordering::AcqRel);
95        if index >= Self::MAX_INDEX {
96            panic!("GCInfoTable memory exhausted");
97        }
98
99        self.table.add(index as _).write(info);
100
101        GCInfoIndex(index)
102    }
103
104    pub unsafe fn get_gc_info(&self, index: GCInfoIndex) -> GCInfo {
105        self.table.add(index.0 as _).read()
106    }
107
108    pub unsafe fn get_gc_info_mut(&mut self, index: GCInfoIndex) -> &mut GCInfo {
109        &mut *self.table.add(index.0 as _)
110    }
111}