mmtk/util/heap/layout/
fragmented_mapper.rs

1use super::mmapper::MapState;
2use super::Mmapper;
3use crate::util::constants::BYTES_IN_PAGE;
4use crate::util::conversions;
5use crate::util::heap::layout::vm_layout::*;
6use crate::util::memory::{MmapAnnotation, MmapStrategy};
7use crate::util::Address;
8use atomic::{Atomic, Ordering};
9use std::cell::UnsafeCell;
10use std::fmt;
11use std::io::Result;
12use std::sync::Mutex;
13
14const MMAP_NUM_CHUNKS: usize = 1 << (33 - LOG_MMAP_CHUNK_BYTES);
15
16// 36 = 128G - physical memory larger than this is uncommon
17// 40 = 2T. Increased to 2T. Though we probably won't use this much memory, we allow quarantine memory range,
18// and that is usually used to quarantine a large amount of memory.
19const LOG_MAPPABLE_BYTES: usize = 40;
20
21/*
22 * Size of a slab.  The value 10 gives a slab size of 1GB, with 1024
23 * chunks per slab, ie a 1k slab map.  In a 64-bit address space, this
24 * will require 1M of slab maps.
25 */
26const LOG_MMAP_CHUNKS_PER_SLAB: usize = 8;
27const LOG_MMAP_SLAB_BYTES: usize = LOG_MMAP_CHUNKS_PER_SLAB + LOG_MMAP_CHUNK_BYTES;
28const MMAP_SLAB_EXTENT: usize = 1 << LOG_MMAP_SLAB_BYTES;
29const MMAP_SLAB_MASK: usize = (1 << LOG_MMAP_SLAB_BYTES) - 1;
30/**
31 * Maximum number of slabs, which determines the maximum mappable address space.
32 */
33const LOG_MAX_SLABS: usize = LOG_MAPPABLE_BYTES - LOG_MMAP_CHUNK_BYTES - LOG_MMAP_CHUNKS_PER_SLAB;
34const MAX_SLABS: usize = 1 << LOG_MAX_SLABS;
35/**
36 * Parameters for the slab table.  The hash function requires it to be
37 * a power of 2.  Must be larger than MAX_SLABS for hashing to work,
38 * and should be much larger for it to be efficient.
39 */
40const LOG_SLAB_TABLE_SIZE: usize = 1 + LOG_MAX_SLABS;
41const HASH_MASK: usize = (1 << LOG_SLAB_TABLE_SIZE) - 1;
42const SLAB_TABLE_SIZE: usize = 1 << LOG_SLAB_TABLE_SIZE;
43const SENTINEL: Address = Address::MAX;
44
45type Slab = [Atomic<MapState>; MMAP_NUM_CHUNKS];
46
47pub struct FragmentedMapper {
48    lock: Mutex<()>,
49    inner: UnsafeCell<InnerFragmentedMapper>,
50}
51
52unsafe impl Send for FragmentedMapper {}
53unsafe impl Sync for FragmentedMapper {}
54
55struct InnerFragmentedMapper {
56    free_slab_index: usize,
57    free_slabs: Vec<Option<Box<Slab>>>,
58    slab_table: Vec<Option<Box<Slab>>>,
59    slab_map: Vec<Address>,
60}
61
62impl fmt::Debug for FragmentedMapper {
63    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
64        write!(f, "FragmentedMapper({})", MMAP_NUM_CHUNKS)
65    }
66}
67
68impl Mmapper for FragmentedMapper {
69    fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) {}
70
71    fn mark_as_mapped(&self, mut start: Address, bytes: usize) {
72        let end = start + bytes;
73        // Iterate over the slabs covered
74        while start < end {
75            let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
76                Self::slab_limit(start)
77            } else {
78                end
79            };
80            let slab = Self::slab_align_down(start);
81            let start_chunk = Self::chunk_index(slab, start);
82            let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
83
84            let mapped = self.get_or_allocate_slab_table(start);
85            for entry in mapped.iter().take(end_chunk).skip(start_chunk) {
86                entry.store(MapState::Mapped, Ordering::Relaxed);
87            }
88            start = high;
89        }
90    }
91
92    fn quarantine_address_range(
93        &self,
94        mut start: Address,
95        pages: usize,
96        strategy: MmapStrategy,
97        anno: &MmapAnnotation,
98    ) -> Result<()> {
99        debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
100
101        let end = start + conversions::pages_to_bytes(pages);
102
103        // Each `MapState` entry governs a chunk.
104        // Align down to the chunk start because we only mmap multiples of whole chunks.
105        let mmap_start = conversions::mmap_chunk_align_down(start);
106
107        // We collect the chunk states from slabs to process them in bulk.
108        let mut state_slices = vec![];
109
110        // Iterate over the slabs covered
111        while start < end {
112            let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
113                Self::slab_limit(start)
114            } else {
115                end
116            };
117
118            let slab = Self::slab_align_down(start);
119            let start_chunk = Self::chunk_index(slab, start);
120            let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
121
122            let mapped = self.get_or_allocate_slab_table(start);
123            state_slices.push(&mapped[start_chunk..end_chunk]);
124
125            start = high;
126        }
127
128        #[cfg(debug_assertions)]
129        {
130            // Check if the number of entries are normal.
131            let mmap_end = conversions::mmap_chunk_align_up(end);
132            let num_slices = state_slices.iter().map(|s| s.len()).sum::<usize>();
133
134            debug_assert_eq!(mmap_start + BYTES_IN_CHUNK * num_slices, mmap_end);
135        }
136
137        // Transition the chunks in bulk.
138        {
139            let _guard = self.lock.lock().unwrap();
140            MapState::bulk_transition_to_quarantined(
141                state_slices.as_slice(),
142                mmap_start,
143                strategy,
144                anno,
145            )?;
146        }
147
148        Ok(())
149    }
150
151    fn ensure_mapped(
152        &self,
153        mut start: Address,
154        pages: usize,
155        strategy: MmapStrategy,
156        anno: &MmapAnnotation,
157    ) -> Result<()> {
158        let end = start + conversions::pages_to_bytes(pages);
159        // Iterate over the slabs covered
160        while start < end {
161            let base = Self::slab_align_down(start);
162            let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
163                Self::slab_limit(start)
164            } else {
165                end
166            };
167
168            let slab = Self::slab_align_down(start);
169            let start_chunk = Self::chunk_index(slab, start);
170            let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
171
172            let mapped = self.get_or_allocate_slab_table(start);
173
174            /* Iterate over the chunks within the slab */
175            for (chunk, entry) in mapped.iter().enumerate().take(end_chunk).skip(start_chunk) {
176                if matches!(entry.load(Ordering::Relaxed), MapState::Mapped) {
177                    continue;
178                }
179
180                let mmap_start = Self::chunk_index_to_address(base, chunk);
181                let _guard = self.lock.lock().unwrap();
182                MapState::transition_to_mapped(entry, mmap_start, strategy, anno)?;
183            }
184            start = high;
185        }
186        Ok(())
187    }
188
189    /**
190     * Return {@code true} if the given address has been mmapped
191     *
192     * @param addr The address in question.
193     * @return {@code true} if the given address has been mmapped
194     */
195    fn is_mapped_address(&self, addr: Address) -> bool {
196        let mapped = self.slab_table(addr);
197        match mapped {
198            Some(mapped) => {
199                mapped[Self::chunk_index(Self::slab_align_down(addr), addr)].load(Ordering::Relaxed)
200                    == MapState::Mapped
201            }
202            _ => false,
203        }
204    }
205
206    fn protect(&self, mut start: Address, pages: usize) {
207        let end = start + conversions::pages_to_bytes(pages);
208        let _guard = self.lock.lock().unwrap();
209        // Iterate over the slabs covered
210        while start < end {
211            let base = Self::slab_align_down(start);
212            let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
213                Self::slab_limit(start)
214            } else {
215                end
216            };
217
218            let slab = Self::slab_align_down(start);
219            let start_chunk = Self::chunk_index(slab, start);
220            let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
221
222            let mapped = self.get_or_allocate_slab_table(start);
223
224            for (chunk, entry) in mapped.iter().enumerate().take(end_chunk).skip(start_chunk) {
225                let mmap_start = Self::chunk_index_to_address(base, chunk);
226                MapState::transition_to_protected(entry, mmap_start).unwrap();
227            }
228            start = high;
229        }
230    }
231}
232
233impl FragmentedMapper {
234    pub fn new() -> Self {
235        Self {
236            lock: Mutex::new(()),
237            inner: UnsafeCell::new(InnerFragmentedMapper {
238                free_slab_index: 0,
239                free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(),
240                slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(),
241                slab_map: vec![SENTINEL; SLAB_TABLE_SIZE],
242            }),
243        }
244    }
245
246    fn new_slab() -> Box<Slab> {
247        // Because AtomicU8 does not implement Copy, it is a compilation error to usen the
248        // expression `[Atomic::new(MapState::Unmapped); MMAP_NUM_CHUNKS]` because that involves
249        // copying.  We must define a constant for it.
250        //
251        // TODO: Use the inline const expression `const { Atomic::new(MapState::Unmapped) }` after
252        // we bump MSRV to 1.79.
253
254        // If we declare a const Atomic, Clippy will warn about const items being interior mutable.
255        // Using inline const expression will eliminate this warning, but that is experimental until
256        // 1.79.  Fix it after we bump MSRV.
257        #[allow(clippy::declare_interior_mutable_const)]
258        const INITIAL_ENTRY: Atomic<MapState> = Atomic::new(MapState::Unmapped);
259
260        let mapped: Box<Slab> = Box::new([INITIAL_ENTRY; MMAP_NUM_CHUNKS]);
261        mapped
262    }
263
264    fn hash(addr: Address) -> usize {
265        let mut initial = (addr & !MMAP_SLAB_MASK) >> LOG_MMAP_SLAB_BYTES;
266        let mut hash = 0;
267        while initial != 0 {
268            hash ^= initial & HASH_MASK;
269            initial >>= LOG_SLAB_TABLE_SIZE;
270        }
271        hash
272    }
273
274    fn slab_table(&self, addr: Address) -> Option<&Slab> {
275        self.get_or_optionally_allocate_slab_table(addr, false)
276    }
277
278    fn get_or_allocate_slab_table(&self, addr: Address) -> &Slab {
279        self.get_or_optionally_allocate_slab_table(addr, true)
280            .unwrap()
281    }
282
283    fn inner(&self) -> &InnerFragmentedMapper {
284        unsafe { &*self.inner.get() }
285    }
286    #[allow(clippy::mut_from_ref)]
287    fn inner_mut(&self) -> &mut InnerFragmentedMapper {
288        unsafe { &mut *self.inner.get() }
289    }
290
291    fn get_or_optionally_allocate_slab_table(
292        &self,
293        addr: Address,
294        allocate: bool,
295    ) -> Option<&Slab> {
296        debug_assert!(addr != SENTINEL);
297        let base = unsafe { Address::from_usize(addr & !MMAP_SLAB_MASK) };
298        let hash = Self::hash(base);
299        let mut index = hash; // Use 'index' to iterate over the hash table so that we remember where we started
300        loop {
301            /* Check for a hash-table hit.  Should be the frequent case. */
302            if base == self.inner().slab_map[index] {
303                return self.slab_table_for(addr, index);
304            }
305            let _guard = self.lock.lock().unwrap();
306
307            /* Check whether another thread has allocated a slab while we were acquiring the lock */
308            if base == self.inner().slab_map[index] {
309                // drop(guard);
310                return self.slab_table_for(addr, index);
311            }
312
313            /* Check for a free slot */
314            if self.inner().slab_map[index] == SENTINEL {
315                if !allocate {
316                    // drop(guard);
317                    return None;
318                }
319                unsafe {
320                    self.commit_free_slab(index);
321                }
322                self.inner_mut().slab_map[index] = base;
323                return self.slab_table_for(addr, index);
324            }
325            //   lock.release();
326            index += 1;
327            index %= SLAB_TABLE_SIZE;
328            assert!(index != hash, "MMAP slab table is full!");
329        }
330    }
331
332    fn slab_table_for(&self, _addr: Address, index: usize) -> Option<&Slab> {
333        debug_assert!(self.inner().slab_table[index].is_some());
334        self.inner().slab_table[index].as_ref().map(|x| x as &Slab)
335    }
336
337    /**
338     * Take a free slab of chunks from the freeSlabs array, and insert it
339     * at the correct index in the slabTable.
340     * @param index slab table index
341     */
342    /// # Safety
343    ///
344    /// Caller must ensure that only one thread is calling this function at a time.
345    unsafe fn commit_free_slab(&self, index: usize) {
346        assert!(
347            self.inner().free_slab_index < MAX_SLABS,
348            "All free slabs used: virtual address space is exhausled."
349        );
350        debug_assert!(self.inner().slab_table[index].is_none());
351        debug_assert!(self.inner().free_slabs[self.inner().free_slab_index].is_some());
352        ::std::mem::swap(
353            &mut self.inner_mut().slab_table[index],
354            &mut self.inner_mut().free_slabs[self.inner().free_slab_index],
355        );
356        self.inner_mut().free_slab_index += 1;
357    }
358
359    fn chunk_index_to_address(base: Address, chunk: usize) -> Address {
360        base + (chunk << LOG_MMAP_CHUNK_BYTES)
361    }
362
363    /**
364     * @param addr an address
365     * @return the base address of the enclosing slab
366     */
367    fn slab_align_down(addr: Address) -> Address {
368        unsafe { Address::from_usize(addr & !MMAP_SLAB_MASK) }
369    }
370
371    /**
372     * @param addr an address
373     * @return the base address of the next slab
374     */
375    fn slab_limit(addr: Address) -> Address {
376        Self::slab_align_down(addr) + MMAP_SLAB_EXTENT
377    }
378
379    /**
380     * @param slab Address of the slab
381     * @param addr Address within a chunk (could be in the next slab)
382     * @return The index of the chunk within the slab (could be beyond the end of the slab)
383     */
384    fn chunk_index(slab: Address, addr: Address) -> usize {
385        let delta = addr - slab;
386        delta >> LOG_MMAP_CHUNK_BYTES
387    }
388}
389
390impl Default for FragmentedMapper {
391    fn default() -> Self {
392        Self::new()
393    }
394}
395
396#[cfg(test)]
397mod tests {
398    use super::*;
399    use crate::mmap_anno_test;
400    use crate::util::constants::LOG_BYTES_IN_PAGE;
401    use crate::util::heap::layout::vm_layout::MMAP_CHUNK_BYTES;
402    use crate::util::memory;
403    use crate::util::test_util::FRAGMENTED_MMAPPER_TEST_REGION;
404    use crate::util::test_util::{serial_test, with_cleanup};
405    use crate::util::{conversions, Address};
406
407    const FIXED_ADDRESS: Address = FRAGMENTED_MMAPPER_TEST_REGION.start;
408    const MAX_BYTES: usize = FRAGMENTED_MMAPPER_TEST_REGION.size;
409
410    fn pages_to_chunks_up(pages: usize) -> usize {
411        conversions::raw_align_up(pages, MMAP_CHUNK_BYTES) / MMAP_CHUNK_BYTES
412    }
413
414    fn get_chunk_map_state(mmapper: &FragmentedMapper, chunk: Address) -> Option<MapState> {
415        assert_eq!(conversions::mmap_chunk_align_up(chunk), chunk);
416        let mapped = mmapper.slab_table(chunk);
417        mapped.map(|m| {
418            m[FragmentedMapper::chunk_index(FragmentedMapper::slab_align_down(chunk), chunk)]
419                .load(Ordering::Relaxed)
420        })
421    }
422
423    #[test]
424    fn address_hashing() {
425        for i in 0..10 {
426            unsafe {
427                let a = i << LOG_MMAP_SLAB_BYTES;
428                assert_eq!(FragmentedMapper::hash(Address::from_usize(a)), i);
429
430                let b = a + ((i + 1) << (LOG_MMAP_SLAB_BYTES + LOG_SLAB_TABLE_SIZE + 1));
431                assert_eq!(
432                    FragmentedMapper::hash(Address::from_usize(b)),
433                    i ^ ((i + 1) << 1)
434                );
435
436                let c = b + ((i + 2) << (LOG_MMAP_SLAB_BYTES + LOG_SLAB_TABLE_SIZE * 2 + 2));
437                assert_eq!(
438                    FragmentedMapper::hash(Address::from_usize(c)),
439                    i ^ ((i + 1) << 1) ^ ((i + 2) << 2)
440                );
441            }
442        }
443    }
444
445    #[test]
446    fn ensure_mapped_1page() {
447        serial_test(|| {
448            let pages = 1;
449            with_cleanup(
450                || {
451                    let mmapper = FragmentedMapper::new();
452                    mmapper
453                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
454                        .unwrap();
455
456                    let chunks = pages_to_chunks_up(pages);
457                    for i in 0..chunks {
458                        assert_eq!(
459                            get_chunk_map_state(
460                                &mmapper,
461                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
462                            ),
463                            Some(MapState::Mapped)
464                        );
465                    }
466                },
467                || {
468                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
469                },
470            )
471        })
472    }
473    #[test]
474    fn ensure_mapped_1chunk() {
475        serial_test(|| {
476            let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
477            with_cleanup(
478                || {
479                    let mmapper = FragmentedMapper::new();
480                    mmapper
481                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
482                        .unwrap();
483
484                    let chunks = pages_to_chunks_up(pages);
485                    for i in 0..chunks {
486                        assert_eq!(
487                            get_chunk_map_state(
488                                &mmapper,
489                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
490                            ),
491                            Some(MapState::Mapped)
492                        );
493                    }
494                },
495                || {
496                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
497                },
498            )
499        })
500    }
501
502    #[test]
503    fn ensure_mapped_more_than_1chunk() {
504        serial_test(|| {
505            let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
506            with_cleanup(
507                || {
508                    let mmapper = FragmentedMapper::new();
509                    mmapper
510                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
511                        .unwrap();
512
513                    let chunks = pages_to_chunks_up(pages);
514                    for i in 0..chunks {
515                        assert_eq!(
516                            get_chunk_map_state(
517                                &mmapper,
518                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
519                            ),
520                            Some(MapState::Mapped)
521                        );
522                    }
523                },
524                || {
525                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
526                },
527            )
528        })
529    }
530
531    #[test]
532    fn protect() {
533        serial_test(|| {
534            with_cleanup(
535                || {
536                    // map 2 chunks
537                    let mmapper = FragmentedMapper::new();
538                    let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
539                    mmapper
540                        .ensure_mapped(
541                            FIXED_ADDRESS,
542                            pages_per_chunk * 2,
543                            MmapStrategy::TEST,
544                            mmap_anno_test!(),
545                        )
546                        .unwrap();
547
548                    // protect 1 chunk
549                    mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
550
551                    assert_eq!(
552                        get_chunk_map_state(&mmapper, FIXED_ADDRESS),
553                        Some(MapState::Protected)
554                    );
555                    assert_eq!(
556                        get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
557                        Some(MapState::Mapped)
558                    );
559                },
560                || {
561                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
562                },
563            )
564        })
565    }
566
567    #[test]
568    fn ensure_mapped_on_protected_chunks() {
569        serial_test(|| {
570            with_cleanup(
571                || {
572                    // map 2 chunks
573                    let mmapper = FragmentedMapper::new();
574                    let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
575                    mmapper
576                        .ensure_mapped(
577                            FIXED_ADDRESS,
578                            pages_per_chunk * 2,
579                            MmapStrategy::TEST,
580                            mmap_anno_test!(),
581                        )
582                        .unwrap();
583
584                    // protect 1 chunk
585                    mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
586
587                    assert_eq!(
588                        get_chunk_map_state(&mmapper, FIXED_ADDRESS),
589                        Some(MapState::Protected)
590                    );
591                    assert_eq!(
592                        get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
593                        Some(MapState::Mapped)
594                    );
595
596                    // ensure mapped - this will unprotect the previously protected chunk
597                    mmapper
598                        .ensure_mapped(
599                            FIXED_ADDRESS,
600                            pages_per_chunk * 2,
601                            MmapStrategy::TEST,
602                            mmap_anno_test!(),
603                        )
604                        .unwrap();
605                    assert_eq!(
606                        get_chunk_map_state(&mmapper, FIXED_ADDRESS),
607                        Some(MapState::Mapped)
608                    );
609                    assert_eq!(
610                        get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
611                        Some(MapState::Mapped)
612                    );
613                },
614                || {
615                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
616                },
617            )
618        })
619    }
620}