1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
use std::{
any::TypeId,
collections::hash_map::DefaultHasher,
hash::Hash,
hash::Hasher,
mem::{size_of, ManuallyDrop, MaybeUninit},
ptr::null_mut,
sync::atomic::AtomicU16,
};
use crate::internal::{
finalize_trait::FinalizationCallback, gc_info::GCInfoIndex, trace_trait::TraceCallback,
};
#[cfg(not(target_arch = "wasm32"))]
use crate::mmap::Mmap;
#[repr(C)]
pub struct GCInfo {
pub finalize: Option<FinalizationCallback>,
pub trace: TraceCallback,
pub vtable: usize,
}
pub struct GCInfoTable {
#[cfg(not(wasm))]
map: Mmap,
table: *mut GCInfo,
type_id_map: MaybeUninit<Vec<AtomicU16>>,
current_index: AtomicU16,
}
pub(crate) static mut GC_TABLE: GCInfoTable = GCInfoTable {
table: null_mut(),
current_index: AtomicU16::new(1),
type_id_map: MaybeUninit::uninit(),
#[cfg(not(wasm))]
map: Mmap::uninit(),
};
impl GCInfoTable {
pub const MAX_INDEX: u16 = 1 << 14;
pub const MIN_INDEX: u16 = 1;
pub const INITIAL_WANTED_LIMIT: u16 = 512;
pub(crate) unsafe fn init(mem: Option<&'static mut [u8]>) {
#[cfg(wasm)]
{
GC_TABLE.table = mem.unwrap().as_mut_ptr();
}
#[cfg(not(wasm))]
{
let _ = mem;
let map = Mmap::new(Self::MAX_INDEX as usize * size_of::<GCInfo>());
GC_TABLE.map = map;
GC_TABLE.table = GC_TABLE.map.start().cast();
}
let mut v = ManuallyDrop::new(vec![0u16; Self::MAX_INDEX as usize]);
*GC_TABLE.type_id_map.as_mut_ptr() =
Vec::from_raw_parts(v.as_mut_ptr().cast::<AtomicU16>(), v.len(), v.capacity());
}
pub(crate) fn add_gc_info_type_id(&mut self, type_id: TypeId, info: GCInfo) -> GCInfoIndex {
unsafe {
let mut hasher = DefaultHasher::default();
type_id.hash(&mut hasher);
let key = hasher.finish();
let table_idx = key % (*self.type_id_map.as_ptr()).len() as u64;
let index = &(*self.type_id_map.as_ptr())[table_idx as usize];
let index_ = index.load(std::sync::atomic::Ordering::Acquire);
if index_ != 0 {
return GCInfoIndex(index_);
}
let index_ = self.add_gc_info(info);
index.store(index_.0, std::sync::atomic::Ordering::Release);
index_
}
}
pub unsafe fn add_gc_info(&mut self, info: GCInfo) -> GCInfoIndex {
let index = self
.current_index
.fetch_add(1, std::sync::atomic::Ordering::AcqRel);
if index >= Self::MAX_INDEX {
panic!("GCInfoTable memory exhausted");
}
self.table.add(index as _).write(info);
GCInfoIndex(index)
}
pub unsafe fn get_gc_info(&self, index: GCInfoIndex) -> GCInfo {
self.table.add(index.0 as _).read()
}
pub unsafe fn get_gc_info_mut(&mut self, index: GCInfoIndex) -> &mut GCInfo {
&mut *self.table.add(index.0 as _)
}
}