comet/
large_space.rs

1use std::{ptr::null_mut, sync::atomic::AtomicBool};
2
3use crate::{gc_info_table::GC_TABLE, header::HeapObjectHeader};
4
5/// Precise allocation used for large objects (>= LARGE_CUTOFF).
6/// Starlight uses mimalloc that already knows what to do for large allocations. The GC shouldn't
7/// have to think about such things. That's where PreciseAllocation comes in. We will allocate large
8/// objects directly using mi_malloc, and put the PreciseAllocation header just before them. We can detect
9/// when a *mut GcPointerBase is a PreciseAllocation because it will have the ATOM_SIZE / 2 bit set.
10#[repr(C)]
11pub struct PreciseAllocation {
12    //pub link: LinkedListLink,
13    /// allocation request size
14    pub cell_size: usize,
15    pub mark: bool,
16    /// index in precise_allocations
17    pub index_in_space: u32,
18    /// Is alignment adjusted?
19    //pub is_newly_allocated: bool,
20    pub adjusted_alignment: bool,
21    /// Is this even valid allocation?
22    pub has_valid_cell: bool,
23    pub is_newly_allocated: bool,
24}
25
26impl PreciseAllocation {
27    /// Alignment of allocation.
28    pub const ALIGNMENT: usize = 16;
29    /// Alignment of pointer returned by `Self::cell`.
30    pub const HALF_ALIGNMENT: usize = Self::ALIGNMENT / 2;
31    /// Check if raw_ptr is precisely allocated.
32    pub fn is_precise(raw_ptr: *mut ()) -> bool {
33        (raw_ptr as usize & Self::HALF_ALIGNMENT) != 0
34    }
35    pub fn mark_atomic(&self) -> &AtomicBool {
36        as_atomic!(&self.mark;AtomicBool)
37    }
38    /// Create PreciseAllocation from pointer
39    pub fn from_cell(ptr: *mut HeapObjectHeader) -> *mut Self {
40        unsafe {
41            ptr.cast::<u8>()
42                .offset(-(Self::header_size() as isize))
43                .cast()
44        }
45    }
46    /// Return base pointer
47    #[inline]
48    pub fn base_pointer(&self) -> *mut () {
49        if self.adjusted_alignment {
50            ((self as *const Self as isize) - (Self::HALF_ALIGNMENT as isize)) as *mut ()
51        } else {
52            self as *const Self as *mut ()
53        }
54    }
55
56    /// Return cell address, it is always aligned to `Self::HALF_ALIGNMENT`.
57    pub fn cell(&self) -> *mut HeapObjectHeader {
58        let addr = unsafe { (self as *const Self as *const u8).add(Self::header_size()) };
59        addr as _
60    }
61    /// Return true if raw_ptr is above lower bound
62    pub fn above_lower_bound(&self, raw_ptr: *mut ()) -> bool {
63        let ptr = raw_ptr;
64        let begin = self.cell() as *mut ();
65        ptr >= begin
66    }
67    /// Return true if raw_ptr below upper bound
68    pub fn below_upper_bound(&self, raw_ptr: *mut ()) -> bool {
69        let ptr = raw_ptr;
70        let begin = self.cell() as *mut ();
71        let end = (begin as usize + self.cell_size) as *mut ();
72        ptr <= (end as usize + 8) as *mut ()
73    }
74    /// Returns header size + required alignment to make cell be aligned to 8.
75    pub const fn header_size() -> usize {
76        ((core::mem::size_of::<PreciseAllocation>() + Self::HALF_ALIGNMENT - 1)
77            & !(Self::HALF_ALIGNMENT - 1))
78            | Self::HALF_ALIGNMENT
79    }
80    /// Does this allocation contains raw_ptr?
81    pub fn contains(&self, raw_ptr: *mut ()) -> bool {
82        self.above_lower_bound(raw_ptr) && self.below_upper_bound(raw_ptr)
83    }
84    pub fn flip(&mut self) {
85        // Propagate the last time's mark bit to m_isNewlyAllocated so that `isLive` will say "yes" until this GC cycle finishes.
86        // After that, m_isNewlyAllocated is cleared again. So only previously marked or actually newly created objects survive.
87        // We do not need to care about concurrency here since marking thread is stopped right now. This is equivalent to the logic
88        // of MarkedBlock::aboutToMarkSlow.
89        // We invoke this function only when this is full collection. This ensures that at the end of upcoming cycle, we will
90        // clear NewlyAllocated bits of all objects. So this works correctly.
91        //
92        //                                      N: NewlyAllocated, M: Marked
93        //                                                 after this         at the end        When cycle
94        //                                            N M  function    N M     of cycle    N M  is finished   N M
95        // The live object survives the last cycle    0 1      =>      1 0        =>       1 1       =>       0 1    => live
96        // The dead object in the last cycle          0 0      =>      0 0        =>       0 0       =>       0 0    => dead
97        // The live object newly created after this            =>      1 0        =>       1 1       =>       0 1    => live
98        // The dead object newly created after this            =>      1 0        =>       1 0       =>       0 0    => dead
99        // The live object newly created before this  1 0      =>      1 0        =>       1 1       =>       0 1    => live
100        // The dead object newly created before this  1 0      =>      1 0        =>       1 0       =>       0 0    => dead
101        //                                                                                                    ^
102        //                                                              This is ensured since this function is used only for full GC.
103        self.is_newly_allocated |= self.is_marked();
104        self.mark_atomic().store(false, atomic::Ordering::Relaxed);
105    }
106
107    pub fn is_marked(&self) -> bool {
108        self.mark_atomic().load(atomic::Ordering::Relaxed)
109    }
110
111    pub fn test_and_set_marked(&self) -> bool {
112        if self.is_marked() {
113            return true;
114        }
115        match self.mark_atomic().compare_exchange(
116            false,
117            true,
118            atomic::Ordering::Relaxed,
119            atomic::Ordering::Relaxed,
120        ) {
121            Ok(_) => false,
122            _ => true,
123        }
124    }
125
126    pub fn clear_marked(&self) {
127        self.mark_atomic().store(false, atomic::Ordering::Relaxed);
128    }
129
130    /// Finalize cell if this allocation is not marked.
131    pub fn sweep(&mut self) -> bool {
132        let cell = self.cell();
133        unsafe {
134            if self.has_valid_cell && !self.is_live() {
135                let info = GC_TABLE.get_gc_info((*cell).get_gc_info_index());
136                if let Some(cb) = info.finalize {
137                    cb((*cell).payload() as _);
138                }
139                self.has_valid_cell = false;
140            }
141        }
142        true
143    }
144    /// Try to create precise allocation (no way that it will return null for now).
145    pub fn try_create(size: usize, index_in_space: u32) -> *mut Self {
146        let adjusted_alignment_allocation_size = Self::header_size() + size + Self::HALF_ALIGNMENT;
147        unsafe {
148            let mut space = libc::malloc(adjusted_alignment_allocation_size).cast::<u8>();
149
150            //let mut space = libc::malloc(adjusted_alignment_allocation_size);
151            let mut adjusted_alignment = false;
152            if !is_aligned_for_precise_allocation(space) {
153                space = space.add(Self::HALF_ALIGNMENT);
154                adjusted_alignment = true;
155                assert!(is_aligned_for_precise_allocation(space));
156            }
157            assert!(size != 0);
158            space.cast::<Self>().write(Self {
159                //link: LinkedListLink::new(),
160                adjusted_alignment,
161                mark: false,
162                //is_newly_allocated: true,
163                has_valid_cell: true,
164                cell_size: size,
165                index_in_space,
166                is_newly_allocated: false,
167            });
168
169            space.cast()
170        }
171    }
172    pub fn is_newly_allocated(&self) -> bool {
173        self.is_newly_allocated
174    }
175    pub fn is_live(&self) -> bool {
176        self.is_marked() || self.is_newly_allocated
177    }
178
179    /// return cell size
180    pub fn cell_size(&self) -> usize {
181        self.cell_size
182    }
183    /// Destroy this allocation
184    pub fn destroy(&mut self) {
185        let base = self.base_pointer();
186        unsafe {
187            libc::free(base as _);
188        }
189    }
190
191    pub fn is_empty(&self) -> bool {
192        !self.is_marked() && !self.is_newly_allocated()
193    }
194}
195/// Check if `mem` is aligned for precise allocation
196pub fn is_aligned_for_precise_allocation(mem: *mut u8) -> bool {
197    let allocable_ptr = mem as usize;
198    (allocable_ptr & (PreciseAllocation::ALIGNMENT - 1)) == 0
199}
200/// This space contains objects which are larger than the size limits of other spaces.
201/// Each object gets its own malloc'd region of memory.
202/// Large objects are never moved by the garbage collector.
203pub struct LargeObjectSpace {
204    pub(crate) allocations: Vec<*mut PreciseAllocation>,
205    pub(crate) precise_allocations_nursery_offset: usize,
206    pub(crate) precise_allocations_offest_for_this_collection: usize,
207    pub(crate) precise_allocations_offset_nursery_for_sweep: usize,
208    pub(crate) precise_allocations_for_this_collection_size: usize,
209    pub(crate) precise_allocations_for_this_collection_begin: *mut *mut PreciseAllocation,
210    pub(crate) precise_allocations_for_this_collection_end: *mut *mut PreciseAllocation,
211}
212
213impl LargeObjectSpace {
214    pub(crate) fn new() -> Self {
215        Self {
216            allocations: Vec::new(),
217
218            precise_allocations_nursery_offset: 0,
219            precise_allocations_offest_for_this_collection: 0,
220            precise_allocations_offset_nursery_for_sweep: 0,
221            precise_allocations_for_this_collection_end: null_mut(),
222            precise_allocations_for_this_collection_begin: null_mut(),
223            precise_allocations_for_this_collection_size: 0,
224        }
225    }
226    pub fn begin_marking(&mut self, full: bool) {
227        if full {
228            for alloc in self.allocations.iter() {
229                unsafe {
230                    (**alloc).flip();
231                }
232            }
233        }
234    }
235    #[inline]
236    pub fn contains(&self, pointer: *const u8) -> *mut HeapObjectHeader {
237        // check only for eden space pointers when conservatively scanning.
238        unsafe {
239            if self.precise_allocations_for_this_collection_size != 0 {
240                if (**self.precise_allocations_for_this_collection_begin)
241                    .above_lower_bound(pointer as _)
242                    && (**(self.precise_allocations_for_this_collection_end.sub(1)))
243                        .below_upper_bound(pointer as _)
244                {
245                    let prec = PreciseAllocation::from_cell(pointer as _);
246                    let slice = std::slice::from_raw_parts(
247                        self.precise_allocations_for_this_collection_begin,
248                        self.precise_allocations_for_this_collection_size,
249                    );
250                    let result = slice.binary_search_by(|ptr| ptr.cmp(&prec));
251                    if let Ok(_) = result {
252                        return (*prec).cell();
253                    }
254                }
255            }
256            null_mut()
257        }
258    }
259
260    pub fn prepare_for_allocation(&mut self, eden: bool) {
261        if eden {
262            self.precise_allocations_offset_nursery_for_sweep =
263                self.precise_allocations_nursery_offset;
264        } else {
265            self.precise_allocations_offset_nursery_for_sweep = 0;
266        }
267        self.precise_allocations_nursery_offset = self.allocations.len();
268    }
269
270    pub fn prepare_for_marking(&mut self, eden: bool) {
271        if eden {
272            self.precise_allocations_offest_for_this_collection =
273                self.precise_allocations_nursery_offset;
274        } else {
275            self.precise_allocations_offest_for_this_collection = 0;
276        }
277    }
278    /// Sort allocations before consrvative scan.
279    pub fn prepare_for_conservative_scan(&mut self) {
280        unsafe {
281            self.precise_allocations_for_this_collection_begin = self
282                .allocations
283                .as_mut_ptr()
284                .add(self.precise_allocations_offest_for_this_collection);
285            self.precise_allocations_for_this_collection_size =
286                self.allocations.len() - self.precise_allocations_offest_for_this_collection;
287            self.precise_allocations_for_this_collection_end =
288                self.allocations.as_mut_ptr().add(self.allocations.len());
289            let slice = std::slice::from_raw_parts_mut(
290                self.precise_allocations_for_this_collection_begin,
291                self.precise_allocations_for_this_collection_size,
292            );
293
294            slice.sort_by(|a, b| a.cmp(b));
295
296            let mut index = self.precise_allocations_offest_for_this_collection;
297            let mut start = self.precise_allocations_for_this_collection_begin;
298            let end = self.precise_allocations_for_this_collection_end;
299            while start != end {
300                (**start).index_in_space = index as _;
301                index += 1;
302                start = start.add(1);
303            }
304        }
305    }
306
307    pub fn sweep(&mut self) {
308        let mut src_index = self.precise_allocations_offset_nursery_for_sweep;
309        let mut dst_index = src_index;
310        while src_index < self.allocations.len() {
311            let allocation = self.allocations[src_index];
312            src_index += 1;
313            unsafe {
314                (*allocation).sweep();
315                if (*allocation).is_empty() {
316                    (*allocation).destroy();
317
318                    continue;
319                } else {
320                    (*allocation).index_in_space = dst_index as u32;
321                    self.allocations[dst_index] = allocation;
322                    dst_index += 1;
323                }
324            }
325        }
326        self.allocations.resize(dst_index, null_mut());
327        self.precise_allocations_nursery_offset = self.allocations.len();
328    }
329
330    pub fn allocate(&mut self, size: usize) -> *mut HeapObjectHeader {
331        unsafe {
332            let index = self.allocations.len();
333            let memory = PreciseAllocation::try_create(size, index as _);
334            if memory.is_null() {
335                panic!("LargeObjectSpace: OOM");
336            }
337
338            self.allocations.push(memory);
339
340            let cell = (*memory).cell();
341            (*cell).set_size(0); // size of 0 means object is large.
342            (*memory).cell()
343        }
344    }
345}
346
347impl Drop for LargeObjectSpace {
348    fn drop(&mut self) {
349        while let Some(alloc) = self.allocations.pop() {
350            unsafe {
351                let hdr = (*alloc).cell();
352                if let Some(callback) = GC_TABLE.get_gc_info((*hdr).get_gc_info_index()).finalize {
353                    callback((*hdr).payload() as _);
354                }
355                (*alloc).destroy();
356            }
357        }
358    }
359}