mmtk/util/
linear_scan.rs

1use crate::util::metadata::vo_bit;
2use crate::util::Address;
3use crate::util::ObjectReference;
4use crate::vm::ObjectModel;
5use crate::vm::VMBinding;
6use std::marker::PhantomData;
7
8// FIXME: MarkCompact uses linear scanning to discover allocated objects in the MarkCompactSpace.
9// It should use a local metadata (specific to the MarkCompactSpace) for that purpose.
10// In the future, we should let MarkCompact do linear scanning using its local metadata instead.
11
12/// Iterate over an address range, and find each object by VO bit.
13/// ATOMIC_LOAD_VO_BIT can be set to false if it is known that loading VO bit
14/// non-atomically is correct (e.g. a single thread is scanning this address range, and
15/// it is the only thread that accesses VO bit).
16pub struct ObjectIterator<VM: VMBinding, S: LinearScanObjectSize, const ATOMIC_LOAD_VO_BIT: bool> {
17    start: Address,
18    end: Address,
19    cursor: Address,
20    _p: PhantomData<(VM, S)>,
21}
22
23impl<VM: VMBinding, S: LinearScanObjectSize, const ATOMIC_LOAD_VO_BIT: bool>
24    ObjectIterator<VM, S, ATOMIC_LOAD_VO_BIT>
25{
26    /// Create an iterator for the address range. The caller must ensure
27    /// that the VO bit metadata is mapped for the address range.
28    pub fn new(start: Address, end: Address) -> Self {
29        debug_assert!(start < end);
30        debug_assert!(
31            start.is_aligned_to(ObjectReference::ALIGNMENT),
32            "start is not word-aligned: {start}"
33        );
34        debug_assert!(
35            end.is_aligned_to(ObjectReference::ALIGNMENT),
36            "end is not word-aligned: {end}"
37        );
38        ObjectIterator {
39            start,
40            end,
41            cursor: start,
42            _p: PhantomData,
43        }
44    }
45}
46
47impl<VM: VMBinding, S: LinearScanObjectSize, const ATOMIC_LOAD_VO_BIT: bool> std::iter::Iterator
48    for ObjectIterator<VM, S, ATOMIC_LOAD_VO_BIT>
49{
50    type Item = ObjectReference;
51
52    fn next(&mut self) -> Option<<Self as Iterator>::Item> {
53        while self.cursor < self.end {
54            let is_object = if ATOMIC_LOAD_VO_BIT {
55                vo_bit::is_vo_bit_set_for_addr(self.cursor)
56            } else {
57                unsafe { vo_bit::is_vo_bit_set_unsafe(self.cursor) }
58            };
59
60            if let Some(object) = is_object {
61                self.cursor += S::size(object);
62                return Some(object);
63            } else {
64                self.cursor += VM::MIN_ALIGNMENT;
65            }
66        }
67
68        None
69    }
70}
71
72/// Describe object size for linear scan. Different policies may have
73/// different object sizes (e.g. extra metadata, etc)
74pub trait LinearScanObjectSize {
75    /// The object size in bytes for the given object.
76    fn size(object: ObjectReference) -> usize;
77}
78
79/// Default object size as ObjectModel::get_current_size()
80pub struct DefaultObjectSize<VM: VMBinding>(PhantomData<VM>);
81impl<VM: VMBinding> LinearScanObjectSize for DefaultObjectSize<VM> {
82    fn size(object: ObjectReference) -> usize {
83        VM::VMObjectModel::get_current_size(object)
84    }
85}
86
87/// Region represents a memory region with a properly aligned address as its start and a fixed size for the region.
88/// Region provides a set of utility methods, along with a RegionIterator that linearly scans at the step of a region.
89pub trait Region: Copy + PartialEq + PartialOrd {
90    /// log2 of the size in bytes for the region.
91    const LOG_BYTES: usize;
92    /// The size in bytes for the region.
93    const BYTES: usize = 1 << Self::LOG_BYTES;
94
95    /// Create a region from an address that is aligned to the region boundary. The method should panic if the address
96    /// is not properly aligned to the region. For performance, this method should always be inlined.
97    fn from_aligned_address(address: Address) -> Self;
98    /// Return the start address of the region. For performance, this method should always be inlined.
99    fn start(&self) -> Address;
100
101    /// Create a region from an arbitrary address.
102    fn from_unaligned_address(address: Address) -> Self {
103        Self::from_aligned_address(Self::align(address))
104    }
105
106    /// Align the address to the region.
107    fn align(address: Address) -> Address {
108        address.align_down(Self::BYTES)
109    }
110    /// Check if an address is aligned to the region.
111    fn is_aligned(address: Address) -> bool {
112        address.is_aligned_to(Self::BYTES)
113    }
114
115    /// Return the end address of the region. Note that the end address is not in the region.
116    fn end(&self) -> Address {
117        self.start() + Self::BYTES
118    }
119    /// Return the next region after this one.
120    fn next(&self) -> Self {
121        self.next_nth(1)
122    }
123    /// Return the next nth region after this one.
124    fn next_nth(&self, n: usize) -> Self {
125        debug_assert!(self.start().as_usize() < usize::MAX - (n << Self::LOG_BYTES));
126        Self::from_aligned_address(self.start() + (n << Self::LOG_BYTES))
127    }
128    /// Return the region that contains the object.
129    fn containing(object: ObjectReference) -> Self {
130        Self::from_unaligned_address(object.to_raw_address())
131    }
132    /// Check if the given address is in the region.
133    fn includes_address(&self, addr: Address) -> bool {
134        Self::align(addr) == self.start()
135    }
136}
137
138/// An iterator for contiguous regions.
139pub struct RegionIterator<R: Region> {
140    current: R,
141    end: R,
142}
143
144impl<R: Region> RegionIterator<R> {
145    /// Create an iterator from the start region (inclusive) to the end region (exclusive).
146    pub fn new(start: R, end: R) -> Self {
147        Self {
148            current: start,
149            end,
150        }
151    }
152}
153
154impl<R: Region> Iterator for RegionIterator<R> {
155    type Item = R;
156
157    fn next(&mut self) -> Option<R> {
158        if self.current < self.end {
159            let ret = self.current;
160            self.current = self.current.next();
161            Some(ret)
162        } else {
163            None
164        }
165    }
166}
167
168#[cfg(test)]
169mod tests {
170    use super::*;
171    use crate::util::constants::LOG_BYTES_IN_PAGE;
172
173    const PAGE_SIZE: usize = 1 << LOG_BYTES_IN_PAGE;
174
175    #[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
176    struct Page(Address);
177
178    impl Region for Page {
179        const LOG_BYTES: usize = LOG_BYTES_IN_PAGE as usize;
180
181        fn from_aligned_address(address: Address) -> Self {
182            debug_assert!(address.is_aligned_to(Self::BYTES));
183            Self(address)
184        }
185
186        fn start(&self) -> Address {
187            self.0
188        }
189    }
190
191    #[test]
192    fn test_region_methods() {
193        let addr4k = unsafe { Address::from_usize(PAGE_SIZE) };
194        let addr4k1 = unsafe { Address::from_usize(PAGE_SIZE + 1) };
195
196        // align
197        debug_assert_eq!(Page::align(addr4k), addr4k);
198        debug_assert_eq!(Page::align(addr4k1), addr4k);
199        debug_assert!(Page::is_aligned(addr4k));
200        debug_assert!(!Page::is_aligned(addr4k1));
201
202        let page = Page::from_aligned_address(addr4k);
203        // start/end
204        debug_assert_eq!(page.start(), addr4k);
205        debug_assert_eq!(page.end(), addr4k + PAGE_SIZE);
206        // next
207        debug_assert_eq!(page.next().start(), addr4k + PAGE_SIZE);
208        debug_assert_eq!(page.next_nth(1).start(), addr4k + PAGE_SIZE);
209        debug_assert_eq!(page.next_nth(2).start(), addr4k + 2 * PAGE_SIZE);
210    }
211
212    #[test]
213    fn test_region_iterator_normal() {
214        let addr4k = unsafe { Address::from_usize(PAGE_SIZE) };
215        let page = Page::from_aligned_address(addr4k);
216        let end_page = page.next_nth(5);
217
218        let mut results = vec![];
219        let iter = RegionIterator::new(page, end_page);
220        for p in iter {
221            results.push(p);
222        }
223        debug_assert_eq!(
224            results,
225            vec![
226                page,
227                page.next_nth(1),
228                page.next_nth(2),
229                page.next_nth(3),
230                page.next_nth(4)
231            ]
232        );
233    }
234
235    #[test]
236    fn test_region_iterator_same_start_end() {
237        let addr4k = unsafe { Address::from_usize(PAGE_SIZE) };
238        let page = Page::from_aligned_address(addr4k);
239
240        let mut results = vec![];
241        let iter = RegionIterator::new(page, page);
242        for p in iter {
243            results.push(p);
244        }
245        debug_assert_eq!(results, vec![]);
246    }
247
248    #[test]
249    fn test_region_iterator_smaller_end() {
250        let addr4k = unsafe { Address::from_usize(PAGE_SIZE) };
251        let page = Page::from_aligned_address(addr4k);
252        let end = Page::from_aligned_address(Address::ZERO);
253
254        let mut results = vec![];
255        let iter = RegionIterator::new(page, end);
256        for p in iter {
257            results.push(p);
258        }
259        debug_assert_eq!(results, vec![]);
260    }
261}