1use atomic::Atomic;
2use atomic::Ordering;
3use core::fmt;
4#[cfg(not(target_arch = "wasm32"))]
5use memmap2::MmapMut;
6use std::mem::size_of;
7
8use crate::header::HeapObjectHeader;
9
10pub const fn round_down(x: u64, n: u64) -> u64 {
11 x & !n
12}
13
14pub const fn round_up(x: u64, n: u64) -> u64 {
15 round_down(x + n - 1, n)
16}
17
18#[allow(dead_code)]
19pub struct SpaceBitmap<const ALIGN: usize> {
20 #[cfg(not(target_arch = "wasm32"))]
21 mem_map: MmapMut,
22 #[cfg(target_arch = "wasm32")]
23 mem: *mut u8,
24 bitmap_begin: *mut Atomic<usize>,
25 bitmap_size: usize,
26 heap_begin: usize,
27 heap_limit: usize,
28 name: &'static str,
29}
30const BITS_PER_INTPTR: usize = size_of::<usize>() * 8;
31impl<const ALIGN: usize> SpaceBitmap<ALIGN> {
32 #[inline]
33 pub fn get_name(&self) -> &'static str {
34 self.name
35 }
36 #[inline]
37 pub fn set_name(&mut self, name: &'static str) {
38 self.name = name;
39 }
40 #[inline]
41 pub fn heap_limit(&self) -> usize {
42 self.heap_limit
43 }
44 #[inline]
45 pub fn heap_begin(&self) -> usize {
46 self.heap_begin
47 }
48 #[inline]
49 pub fn set_heap_size(&mut self, bytes: usize) {
50 self.bitmap_size = Self::offset_to_index(bytes) * size_of::<usize>();
51 assert_eq!(self.heap_size(), bytes);
52 }
53 #[inline]
54 pub fn heap_size(&self) -> usize {
55 Self::index_to_offset(self.size() as u64 / size_of::<usize>() as u64) as _
56 }
57 #[inline]
58 pub fn has_address(&self, obj: *const u8) -> bool {
59 let offset = (obj as usize).wrapping_sub(self.heap_begin);
60 let index = Self::offset_to_index(offset);
61 index < (self.bitmap_size / size_of::<usize>())
62 }
63 #[inline]
64 pub fn size(&self) -> usize {
65 self.bitmap_size
66 }
67 #[inline]
68 pub fn begin(&self) -> *mut Atomic<usize> {
69 self.bitmap_begin
70 }
71 #[inline]
72 pub fn index_to_offset(index: u64) -> u64 {
73 index * ALIGN as u64 * BITS_PER_INTPTR as u64
74 }
75 #[inline]
76 pub fn offset_to_index(offset: usize) -> usize {
77 offset / ALIGN / BITS_PER_INTPTR
78 }
79 #[inline]
80 pub fn offset_bit_index(offset: usize) -> usize {
81 (offset / ALIGN) % BITS_PER_INTPTR
82 }
83 #[inline]
84 pub fn offset_to_mask(offset: usize) -> usize {
85 1 << Self::offset_bit_index(offset)
86 }
87 #[inline]
88 pub fn atomic_test_and_set(&self, obj: *const u8) -> bool {
89 let addr = obj as usize;
90 debug_assert!(addr >= self.heap_begin);
91 let offset = addr.wrapping_sub(self.heap_begin);
92 let index = Self::offset_to_index(offset);
93 let mask = Self::offset_to_mask(offset);
94 unsafe {
95 let atomic_entry = &mut *self.bitmap_begin.add(index);
96 debug_assert!(
97 index < self.bitmap_size / size_of::<usize>(),
98 "bitmap_size: {}",
99 self.bitmap_size
100 );
101
102 let mut old_word;
103 while {
104 old_word = atomic_entry.load(Ordering::Relaxed);
105 if (old_word & mask) != 0 {
106 return true;
107 }
108 atomic_entry
109 .compare_exchange_weak(
110 old_word,
111 old_word | mask,
112 Ordering::Relaxed,
113 Ordering::Relaxed,
114 )
115 .is_err()
116 } {}
117
118 false
119 }
120 }
121 #[inline]
122 pub fn test(&self, obj: *const u8) -> bool {
123 let addr = obj as usize;
124 debug_assert!(self.has_address(obj), "Invalid object address: {:p}", obj);
125 debug_assert!(self.heap_begin <= addr);
126 unsafe {
127 let offset = addr.wrapping_sub(self.heap_begin);
128 let index = Self::offset_to_index(offset);
129 ((*self.bitmap_begin.add(index)).load(Ordering::Relaxed) & Self::offset_to_mask(offset))
130 != 0
131 }
132 }
133 #[inline]
134 pub fn modify<const SET_BIT: bool>(&self, obj: *const u8) -> bool {
135 let addr = obj as usize;
136 debug_assert!(
137 addr >= self.heap_begin,
138 "invalid address: {:x} ({:x} > {:x} is false)",
139 addr,
140 addr,
141 self.heap_begin
142 );
143 debug_assert!(self.has_address(obj), "Invalid object address: {:p}", obj);
145 let offset = addr.wrapping_sub(self.heap_begin);
146 let index = Self::offset_to_index(offset);
147 let mask = Self::offset_to_mask(offset);
148 debug_assert!(
149 index < self.bitmap_size / size_of::<usize>(),
150 "bitmap size: {}",
151 self.bitmap_size
152 );
153 let atomic_entry = unsafe { &*self.bitmap_begin.add(index) };
154 let old_word = atomic_entry.load(Ordering::Relaxed);
155 if SET_BIT {
156 if (old_word & mask) == 0 {
162 atomic_entry.store(old_word | mask, Ordering::Relaxed);
163 }
164 } else {
165 atomic_entry.store(old_word & !mask, Ordering::Relaxed);
166 }
167
168 debug_assert_eq!(self.test(obj), SET_BIT);
169 (old_word & mask) != 0
170 }
171
172 #[inline(always)]
173 pub fn set(&self, obj: *const u8) -> bool {
174 self.modify::<true>(obj)
175 }
176
177 #[inline(always)]
178 pub fn clear(&self, obj: *const u8) -> bool {
179 self.modify::<false>(obj)
180 }
181
182 pub fn compute_bitmap_size(capacity: u64) -> usize {
183 let bytes_covered_per_word = ALIGN * BITS_PER_INTPTR;
184 ((round_up(capacity, bytes_covered_per_word as _) / bytes_covered_per_word as u64)
185 * size_of::<usize>() as u64) as _
186 }
187 pub fn compute_heap_size(bitmap_bytes: u64) -> usize {
188 (bitmap_bytes * 8 * ALIGN as u64) as _
189 }
190
191 pub fn clear_range(&self, begin: *const u8, end: *const u8) {
192 let mut begin_offset = begin as usize - self.heap_begin as usize;
193 let mut end_offset = end as usize - self.heap_begin as usize;
194 while begin_offset < end_offset && Self::offset_bit_index(begin_offset) != 0 {
195 self.clear((self.heap_begin + begin_offset) as _);
196 begin_offset += ALIGN;
197 }
198
199 while begin_offset < end_offset && Self::offset_bit_index(end_offset) != 0 {
200 end_offset -= ALIGN;
201 self.clear((self.heap_begin + end_offset) as _);
202 }
203 }
205
206 pub fn visit_marked_range(
210 &self,
211 visit_begin: *const u8,
212 visit_end: *const u8,
213 mut visitor: impl FnMut(*mut HeapObjectHeader),
214 ) {
215 let offset_start = visit_begin as usize - self.heap_begin as usize;
226 let offset_end = visit_end as usize - self.heap_begin as usize;
227
228 let index_start = Self::offset_to_index(offset_start);
229 let index_end = Self::offset_to_index(offset_end);
230 let bit_start = (offset_start / ALIGN) % BITS_PER_INTPTR;
231 let bit_end = (offset_end / ALIGN) % BITS_PER_INTPTR;
232 unsafe {
240 let mut left_edge = self.bitmap_begin.cast::<usize>().add(index_start).read();
241 left_edge &= !((1 << bit_start) - 1);
242 let mut right_edge;
243 if index_start < index_end {
244 if left_edge != 0 {
248 let ptr_base =
249 Self::index_to_offset(index_start as _) as usize + self.heap_begin;
250 while {
251 let shift = left_edge.trailing_zeros();
252 let obj = (ptr_base + shift as usize * ALIGN) as *mut HeapObjectHeader;
253 visitor(obj);
254 left_edge ^= 1 << shift as usize;
255 left_edge != 0
256 } {}
257 }
258 for i in index_start + 1..index_end {
260 let mut w = (*self.bitmap_begin.add(i)).load(Ordering::Relaxed);
261 if w != 0 {
262 let ptr_base = Self::index_to_offset(i as _) as usize + self.heap_begin;
263 while {
264 let shift = w.trailing_zeros();
265 let obj = (ptr_base + shift as usize * ALIGN) as *mut HeapObjectHeader;
266 visitor(obj);
267 w ^= 1 << shift as usize;
268 w != 0
269 } {}
270 }
271 }
272
273 if bit_end == 0 {
276 right_edge = 0;
277 } else {
278 right_edge = self.bitmap_begin.cast::<usize>().add(index_end).read();
279 }
280 } else {
281 right_edge = left_edge;
282 }
283
284 right_edge &= (1 << bit_end) - 1;
287 if right_edge != 0 {
288 let ptr_base = Self::index_to_offset(index_end as _) as usize + self.heap_begin;
289 while {
290 let shift = right_edge.trailing_zeros();
291 let obj = (ptr_base + shift as usize * ALIGN) as *mut HeapObjectHeader;
292 visitor(obj);
293 right_edge ^= 1 << shift as usize;
294 right_edge != 0
295 } {}
296 }
297 }
298 }
299 #[cfg(not(target_arch = "wasm32"))]
300 pub fn new(
301 name: &'static str,
302 mem_map: MmapMut,
303 bitmap_begin: *mut usize,
304 bitmap_size: usize,
305 heap_begin: *mut u8,
306 heap_capacity: usize,
307 ) -> Self {
308 Self {
309 name,
310 mem_map,
311 bitmap_size,
312 bitmap_begin: bitmap_begin.cast(),
313
314 heap_begin: heap_begin as _,
315 heap_limit: heap_begin as usize + heap_capacity,
316 }
317 }
318 #[cfg(not(target_arch = "wasm32"))]
319 pub fn create_from_memmap(
320 name: &'static str,
321 mem_map: MmapMut,
322 heap_begin: *mut u8,
323 heap_capacity: usize,
324 ) -> Self {
325 let bitmap_begin = mem_map.as_ptr() as *mut u8;
326 let bitmap_size = Self::compute_bitmap_size(heap_capacity as _);
327 Self {
328 name,
329 mem_map,
330 bitmap_begin: bitmap_begin.cast(),
331 bitmap_size,
332 heap_begin: heap_begin as _,
333 heap_limit: heap_begin as usize + heap_capacity,
334 }
335 }
336 #[cfg(not(target_arch = "wasm32"))]
337 pub fn create(name: &'static str, heap_begin: *mut u8, heap_capacity: usize) -> Self {
338 let bitmap_size = Self::compute_bitmap_size(heap_capacity as _);
339
340 let mem_map = MmapMut::map_anon(bitmap_size).unwrap();
341 Self::create_from_memmap(name, mem_map, heap_begin, heap_capacity)
342 }
343
344 #[cfg(target_arch = "wasm32")]
345 pub fn create(name: &'static str, heap_begin: *mut u8, heap_capacity: usize) -> Self {
346 let bitmap_size = Self::compute_bitmap_size(heap_capacity as _);
347 let memory = unsafe { libc::malloc(bitmap_size).cast::<u8>() };
348 Self::create_from_raw(name, memory, heap_begin, heap_capacity)
349 }
350 #[cfg(target_arch = "wasm32")]
351 pub fn create_from_raw(
352 name: &'static str,
353 mem: *mut u8,
354 heap_begin: *mut u8,
355 heap_capacity: usize,
356 ) -> Self {
357 let bitmap_begin = mem as *mut u8;
358 let bitmap_size = Self::compute_bitmap_size(heap_capacity as _);
359 Self {
360 name,
361 mem,
362 bitmap_begin: bitmap_begin.cast(),
363 bitmap_size,
364 heap_begin: heap_begin as _,
365 heap_limit: heap_begin as usize + heap_capacity,
366 }
367 }
368}
369
370impl<const ALIGN: usize> fmt::Debug for SpaceBitmap<ALIGN> {
371 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
372 write!(
373 f,
374 "[begin={:p},end={:p}]",
375 self.heap_begin as *const (), self.heap_limit as *const ()
376 )
377 }
378}