comet/internal/
card_table.rs

1use std::{
2    mem::size_of,
3    sync::atomic::{AtomicU8, AtomicUsize, Ordering},
4};
5
6use memmap2::MmapMut;
7#[inline(always)]
8fn byte_cas(old_value: u8, new_value: u8, address: *mut u8) -> bool {
9    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
10    unsafe {
11        let address = address.cast::<AtomicU8>();
12        (*address)
13            .compare_exchange_weak(old_value, new_value, Ordering::Relaxed, Ordering::Relaxed)
14            .is_ok()
15    }
16
17    #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))]
18    unsafe {
19        use std::mem::size_of;
20        use std::sync::atomic::AtomicUsize;
21        let shift_in_bytes = address as usize % size_of::<usize>();
22        let address = address.sub(shift_in_bytes);
23        let shift_in_bits = shift_in_bytes * 8;
24        let word_atomic = address.cast::<AtomicUsize>();
25        let cur_word = word_atomic.load(Ordering::Relaxed) & !(0xff << shift_in_bits);
26        let old_word = cur_word | ((old_value as usize) << shift_in_bits);
27        let new_word = cur_word | ((new_value as usize) << shift_in_bits);
28        word_atomic
29            .compare_exchange_weal(old_word, new_Word, Ordering::Relaxed, Ordering::Relaxed)
30            .is_ok()
31    }
32}
33
34/// Maintain a card table from the the write barrier. All writes of
35/// non-null values to heap addresses should go through an entry in
36/// WriteBarrier, and from there to here.
37#[allow(dead_code)]
38pub struct CardTable {
39    /// Mmapped pages for the card table
40    mem_map: MmapMut,
41    /// Value used to compute card table addresses from object addresses, see get_biased_begin
42    biased_begin: *const u8,
43    /// Card table doesn't begin at the beginning of the mem_map, instead it is displaced by offset
44    /// to allow the byte value of `biased_begin` to equal [CARD_DIRTY](CardTable::CARD_DIRTY).
45    offset: usize,
46}
47impl CardTable {
48    pub const CARD_SHIFT: usize = 10;
49    pub const CARD_SIZE: usize = 1 << Self::CARD_SHIFT;
50    pub const CARD_CLEAN: u8 = 0x0;
51    pub const CARD_DIRTY: u8 = 0x70;
52    pub const CARD_AGED: u8 = Self::CARD_DIRTY - 1;
53    /// Returns a value that when added to a heap address >> [CARD_SHIFT](CardTable::CARD_SHIFT) will address the appropriate
54    /// card table byte. For convenience this value is cached in every local heap.
55    pub fn get_biased_begin(&self) -> *mut u8 {
56        self.biased_begin as _
57    }
58
59    pub fn mem_map_begin(&self) -> *mut u8 {
60        self.mem_map.as_ptr() as _
61    }
62    pub fn mem_map_size(&self) -> usize {
63        self.mem_map.len()
64    }
65    #[inline]
66    pub fn card_from_addr(&self, addr: *const u8) -> *mut u8 {
67        let card_addr = self.biased_begin as usize + (addr as usize >> Self::CARD_SHIFT);
68        card_addr as _
69    }
70
71    #[inline]
72    pub fn addr_from_card(&self, card_addr: *mut u8) -> *mut u8 {
73        let offset = card_addr as usize - self.biased_begin as usize;
74        (offset << Self::CARD_SHIFT) as _
75    }
76
77    #[inline]
78    pub fn modify_cards_atomic(
79        &self,
80        scan_begin: *mut u8,
81        scan_end: *mut u8,
82        mut visitor: impl FnMut(u8) -> u8,
83        mut modified: impl FnMut(*mut u8, u8, u8),
84    ) {
85        unsafe {
86            let mut card_cur = self.card_from_addr(scan_begin);
87            let mut card_end = self.card_from_addr(round_up(scan_end as _, Self::CARD_SIZE) as _);
88
89            while !is_aligned(card_cur as _, size_of::<usize>()) && card_cur < card_end {
90                let mut expected;
91                let mut new_value;
92                while {
93                    expected = *card_cur;
94                    new_value = visitor(expected);
95                    expected != new_value && !byte_cas(expected, new_value, card_cur)
96                } {}
97                if expected != new_value {
98                    modified(card_cur, expected, new_value);
99                }
100                card_cur = card_cur.add(1);
101            }
102            while !is_aligned(card_end as _, size_of::<usize>()) && card_end > card_cur {
103                card_end = card_end.sub(1);
104                let mut expected;
105                let mut new_value;
106                while {
107                    expected = *card_end;
108                    new_value = visitor(expected);
109                    expected != new_value && !byte_cas(expected, new_value, card_cur)
110                } {}
111                if expected != new_value {
112                    modified(card_end, expected, new_value);
113                }
114            }
115
116            let mut word_cur = card_cur.cast::<usize>();
117            let word_end = card_end.cast::<usize>();
118
119            union U1 {
120                expected_word: usize,
121                expected_bytes: [u8; size_of::<usize>()],
122            }
123
124            union U2 {
125                new_word: usize,
126                new_bytes: [u8; size_of::<usize>()],
127            }
128
129            let mut u1 = U1 { expected_word: 0 };
130            let mut u2 = U2 { new_word: 0 };
131            while word_cur < word_end {
132                loop {
133                    u1.expected_word = *word_cur;
134                    if u1.expected_word == 0 {
135                        break; // clean card
136                    }
137                    for i in 0..size_of::<usize>() {
138                        u2.new_bytes[i] = visitor(u1.expected_bytes[i]);
139                    }
140                    let atomic_word = word_cur.cast::<AtomicUsize>();
141                    if (*atomic_word)
142                        .compare_exchange_weak(
143                            u1.expected_word,
144                            u2.new_word,
145                            Ordering::Relaxed,
146                            Ordering::Relaxed,
147                        )
148                        .is_ok()
149                    {
150                        for i in 0..size_of::<usize>() {
151                            let expected_byte = u1.expected_bytes[i];
152                            let new_byte = u2.new_bytes[i];
153                            if new_byte != expected_byte {
154                                modified(word_cur.cast::<u8>().add(i), expected_byte, new_byte);
155                            }
156                        }
157                        break;
158                    }
159                }
160                word_cur = word_cur.add(1);
161            }
162        }
163    }
164}
165
166fn is_aligned(x: usize, n: usize) -> bool {
167    (x & (n - 1)) == 0
168}
169
170fn round_up(x: usize, n: usize) -> usize {
171    round_down(x + n - 1, n)
172}
173
174fn round_down(x: usize, n: usize) -> usize {
175    x & !n
176}