cuckoocache/bitpacked.rs
1use std::sync::atomic::AtomicUsize;
2use std::sync::atomic::Ordering;
3
4pub trait FlagsT {
5 const DEFAULT: bool;
6 /// constructor creates memory to sufficiently
7 /// keep track of garbage collection information for size entries.
8 ///
9 /// # Arguments
10 /// * `size` the number of elements to allocate space for
11 ///
12 /// # Postconditions
13 /// * bit_set, bit_unset, and bit_is_set function properly forall x. x <
14 /// size.
15 /// * All calls to bit_is_set (without subsequent bit_unset) will return
16 /// `Self::DEFAULT`.
17 fn new(size: usize) -> Self;
18
19 /// setup marks all entries and ensures that bit_packed_atomic_flags can store
20 /// at least size entries
21 ///
22 /// # Arguments
23 /// * `b` the number of elements to allocate space for
24 /// # Postconditions
25 /// * bit_set, bit_unset, and bit_is_set function properly forall x. x <
26 /// b
27 /// * All calls to bit_is_set (without subsequent bit_unset) will return
28 /// `Self::DEFAULT`.
29 fn setup(&mut self, b: usize);
30
31 /// bit_set sets an entry as discardable.
32 ///
33 /// # Arguments
34 /// * `s` the index of the entry to bit_set
35 /// # Postconditions
36 /// * immediately subsequent call (assuming proper external memory
37 /// ordering) to bit_is_set(s) == true.
38 fn bit_set(&mut self, s: usize);
39
40 /// bit_unset marks an entry as something that should not be overwritten
41 ///
42 /// # Arguments
43 /// * `s` the index of the entry to bit_unset
44 /// # Postconditions
45 /// * immediately subsequent call (assuming proper external memory
46 /// ordering) to bit_is_set(s) == false.
47 fn bit_unset(&mut self, s: usize);
48
49 /// bit_set_to sets an entry as specified.
50 /// # Arguments
51 /// * `s` the index of the entry to modify
52 /// * `discardable` true does unset, false does set
53 /// # Postconditions
54 /// * immediately subsequent call (assuming proper external memory
55 /// ordering) to bit_is_set(s) == true.
56 fn bit_set_to(&mut self, s: usize, discardable: bool);
57
58 fn bit_is_set(&self, s: usize) -> bool;
59}
60
61pub trait AtomicFlagsT: FlagsT {
62 /// bit_set sets an entry as discardable.
63 ///
64 /// # Arguments
65 /// * `s` the index of the entry to bit_set
66 /// # Postconditions
67 /// * immediately subsequent call (assuming proper external memory
68 /// ordering) to bit_is_set(s) == true.
69 fn bit_set(&self, s: usize);
70
71 /// bit_unset marks an entry as something that should not be overwritten
72 ///
73 /// # Arguments
74 /// * `s` the index of the entry to bit_unset
75 /// # Postconditions
76 /// * immediately subsequent call (assuming proper external memory
77 /// ordering) to bit_is_set(s) == false.
78 fn bit_unset(&self, s: usize);
79}
80
81pub mod non_atomic {
82 use super::*;
83 type FlagType = u64;
84 const WIDTH: usize = 8 * std::mem::size_of::<FlagType>();
85 const NBITS: usize = 6;
86 const MASK: usize = WIDTH - 1;
87 pub struct Flags {
88 mem: Vec<FlagType>,
89 }
90 impl FlagsT for Flags {
91 const DEFAULT: bool = false;
92 fn new(size: usize) -> Self {
93 // pad out the size if needed
94 let size = (size + MASK) / WIDTH;
95 let mut s = Flags {
96 mem: Vec::with_capacity(size),
97 };
98 s.mem.resize(size, 0);
99 s
100 }
101
102 fn setup(&mut self, b: usize) {
103 std::mem::swap(&mut self.mem, &mut Flags::new(b).mem);
104 }
105
106 fn bit_set(&mut self, s: usize) {
107 self.mem[s >> NBITS] |= 1 << (s & MASK);
108 }
109
110 fn bit_unset(&mut self, s: usize) {
111 self.mem[s >> NBITS] &= !(1 << (s & MASK));
112 }
113
114 fn bit_set_to(&mut self, s: usize, discardable: bool) {
115 // clear the bit
116 self.mem[s >> NBITS] &= !(1 << (s & MASK));
117 // then set iff discardable
118 self.mem[s >> NBITS] |= (1 << (s & MASK)) * (discardable as u64);
119 }
120 fn bit_is_set(&self, s: usize) -> bool {
121 return ((1 << (s & MASK)) & self.mem[s >> NBITS]) != 0;
122 }
123 }
124}
125
126pub mod atomic {
127 use super::*;
128 type FlagType = AtomicUsize;
129 const WIDTH: usize = 8 * std::mem::size_of::<FlagType>();
130 const NBITS: usize = 5 + ((WIDTH == 64) as usize);
131 const MASK: usize = WIDTH - 1;
132 pub struct Flags {
133 mem: Vec<FlagType>,
134 }
135 impl FlagsT for Flags {
136 const DEFAULT: bool = true;
137 fn new(size: usize) -> Self {
138 // pad out the size if needed
139 let size = (size + MASK) / WIDTH;
140 let mut s = Flags {
141 mem: Vec::with_capacity(size),
142 };
143 for _ in 0..size {
144 s.mem.push(std::usize::MAX.into());
145 }
146 s
147 }
148
149 fn setup(&mut self, b: usize) {
150 std::mem::swap(&mut self.mem, &mut Flags::new(b).mem);
151 }
152
153 fn bit_set(&mut self, s: usize) {
154 self.mem[s >> NBITS].fetch_or(1 << (s & MASK), Ordering::Relaxed);
155 }
156 fn bit_unset(&mut self, s: usize) {
157 self.mem[s >> NBITS].fetch_and(!(1 << (s & MASK)), Ordering::Relaxed);
158 }
159 fn bit_set_to(&mut self, s: usize, discardable: bool) {
160 // clear the bit
161 self.mem[s >> NBITS].fetch_and(!(1 << (s & MASK)), Ordering::Relaxed);
162 // then set iff discardable
163 self.mem[s >> NBITS].fetch_or(
164 (1 << (s & MASK)) * (discardable as usize),
165 Ordering::Relaxed,
166 );
167 }
168 fn bit_is_set(&self, s: usize) -> bool {
169 return ((1 << (s & MASK)) & self.mem[s >> NBITS].load(Ordering::Relaxed)) != 0;
170 }
171 }
172 impl AtomicFlagsT for Flags {
173 /// bit_set sets an entry as discardable.
174 ///
175 /// # Arguments
176 /// * `s` the index of the entry to bit_set
177 /// @post immediately subsequent call (assuming proper external memory
178 /// ordering) to bit_is_set(s) == true.
179 fn bit_set(&self, s: usize) {
180 self.mem[s >> NBITS].fetch_or(1 << (s & MASK), Ordering::Relaxed);
181 }
182
183 /// bit_unset marks an entry as something that should not be overwritten
184 ///
185 /// # Arguments
186 /// * `s` the index of the entry to bit_unset
187 /// @post immediately subsequent call (assuming proper external memory
188 /// ordering) to bit_is_set(s) == false.
189 fn bit_unset(&self, s: usize) {
190 self.mem[s >> NBITS].fetch_and(!(1 << (s & MASK)), Ordering::Relaxed);
191 }
192 }
193}