1use core::{fmt, marker::PhantomData, mem, ops, slice};
2use endian_num::Le;
3
4use crate::BLOCK_SIZE;
5
6const BLOCK_LIST_ENTRIES: usize = BLOCK_SIZE as usize / mem::size_of::<BlockPtr<BlockRaw>>();
7
8#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct BlockAddr(u64);
16
17impl BlockAddr {
18 const INDEX_SHIFT: u64 = 8;
19 const DECOMP_LEVEL_MASK: u64 = 0xF0;
20 const DECOMP_LEVEL_SHIFT: u64 = 4;
21 const LEVEL_MASK: u64 = 0xF;
22
23 pub unsafe fn new(index: u64, meta: BlockMeta) -> Self {
25 if meta.level.0 > Self::LEVEL_MASK as usize {
27 panic!("block level too large");
28 }
29
30 let decomp_level = meta.decomp_level.unwrap_or_default();
32 if (decomp_level.0 << Self::DECOMP_LEVEL_SHIFT) > Self::DECOMP_LEVEL_MASK as usize {
33 panic!("decompressed block level too large");
34 }
35
36 let inner = index
38 .checked_shl(Self::INDEX_SHIFT as u32)
39 .expect("block index too large")
40 | ((decomp_level.0 as u64) << Self::DECOMP_LEVEL_SHIFT)
41 | (meta.level.0 as u64);
42 Self(inner)
43 }
44
45 pub fn null(meta: BlockMeta) -> Self {
46 unsafe { Self::new(0, meta) }
47 }
48
49 pub fn index(&self) -> u64 {
50 self.0 >> Self::INDEX_SHIFT
52 }
53
54 pub fn level(&self) -> BlockLevel {
55 BlockLevel((self.0 & Self::LEVEL_MASK) as usize)
57 }
58
59 pub fn decomp_level(&self) -> Option<BlockLevel> {
60 let value = (self.0 & Self::DECOMP_LEVEL_MASK) >> Self::DECOMP_LEVEL_SHIFT;
61 if value != 0 {
62 Some(BlockLevel(value as usize))
63 } else {
64 None
65 }
66 }
67
68 pub fn meta(&self) -> BlockMeta {
69 BlockMeta {
70 level: self.level(),
71 decomp_level: self.decomp_level(),
72 }
73 }
74
75 pub fn is_null(&self) -> bool {
76 self.index() == 0
77 }
78}
79
80#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
81pub struct BlockMeta {
82 pub(crate) level: BlockLevel,
83 pub(crate) decomp_level: Option<BlockLevel>,
84}
85
86impl BlockMeta {
87 pub fn new(level: BlockLevel) -> Self {
88 Self {
89 level,
90 decomp_level: None,
91 }
92 }
93
94 pub fn new_compressed(level: BlockLevel, decomp_level: BlockLevel) -> Self {
95 Self {
96 level,
97 decomp_level: Some(decomp_level),
98 }
99 }
100}
101
102#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
110pub struct BlockLevel(pub(crate) usize);
111
112impl BlockLevel {
113 pub(crate) fn for_bytes(bytes: u64) -> Self {
116 if bytes == 0 {
117 return BlockLevel(0);
118 }
119 let level = bytes
120 .div_ceil(BLOCK_SIZE)
121 .next_power_of_two()
122 .trailing_zeros() as usize;
123 BlockLevel(level)
124 }
125
126 pub fn blocks<T: From<u32>>(self) -> T {
129 T::from(1u32 << self.0)
130 }
131
132 pub fn bytes(self) -> u64 {
134 BLOCK_SIZE << self.0
135 }
136}
137
138pub unsafe trait BlockTrait {
139 fn empty(level: BlockLevel) -> Option<Self>
141 where
142 Self: Sized;
143}
144
145#[derive(Clone, Copy, Debug, Default)]
147pub struct BlockData<T> {
148 addr: BlockAddr,
149 data: T,
150}
151
152impl<T> BlockData<T> {
153 pub fn new(addr: BlockAddr, data: T) -> Self {
154 Self { addr, data }
155 }
156
157 pub fn addr(&self) -> BlockAddr {
158 self.addr
159 }
160
161 pub fn data(&self) -> &T {
162 &self.data
163 }
164
165 pub fn data_mut(&mut self) -> &mut T {
166 &mut self.data
167 }
168
169 pub(crate) unsafe fn into_parts(self) -> (BlockAddr, T) {
170 (self.addr, self.data)
171 }
172
173 #[must_use = "don't forget to de-allocate old block address"]
178 pub fn swap_addr(&mut self, addr: BlockAddr) -> BlockAddr {
179 assert_eq!(self.addr.level(), addr.level());
181 let old = self.addr;
182 self.addr = addr;
183 old
184 }
185}
186
187impl<T: BlockTrait> BlockData<T> {
188 pub fn empty(addr: BlockAddr) -> Option<Self> {
189 let empty = T::empty(addr.level())?;
190 Some(Self::new(addr, empty))
191 }
192}
193
194impl<T: ops::Deref<Target = [u8]>> BlockData<T> {
195 pub fn create_ptr(&self) -> BlockPtr<T> {
196 BlockPtr {
197 addr: self.addr.0.into(),
198 hash: seahash::hash(self.data.deref()).into(),
199 phantom: PhantomData,
200 }
201 }
202}
203
204#[repr(C, packed)]
205pub struct BlockList<T> {
206 pub ptrs: [BlockPtr<T>; BLOCK_LIST_ENTRIES],
207}
208
209unsafe impl<T> BlockTrait for BlockList<T> {
210 fn empty(level: BlockLevel) -> Option<Self> {
211 if level.0 == 0 {
212 Some(Self {
213 ptrs: [BlockPtr::default(); BLOCK_LIST_ENTRIES],
214 })
215 } else {
216 None
217 }
218 }
219}
220
221impl<T> BlockList<T> {
222 pub fn is_empty(&self) -> bool {
223 self.ptrs.iter().all(|ptr| ptr.is_null())
224 }
225}
226
227impl<T> ops::Deref for BlockList<T> {
228 type Target = [u8];
229 fn deref(&self) -> &[u8] {
230 unsafe {
231 slice::from_raw_parts(
232 self as *const BlockList<T> as *const u8,
233 mem::size_of::<BlockList<T>>(),
234 ) as &[u8]
235 }
236 }
237}
238
239impl<T> ops::DerefMut for BlockList<T> {
240 fn deref_mut(&mut self) -> &mut [u8] {
241 unsafe {
242 slice::from_raw_parts_mut(
243 self as *mut BlockList<T> as *mut u8,
244 mem::size_of::<BlockList<T>>(),
245 ) as &mut [u8]
246 }
247 }
248}
249
250#[repr(C, packed)]
258pub struct BlockPtr<T> {
259 addr: Le<u64>,
260 hash: Le<u64>,
261 phantom: PhantomData<T>,
262}
263
264impl<T> BlockPtr<T> {
265 pub fn null(meta: BlockMeta) -> Self {
266 Self {
267 addr: BlockAddr::null(meta).0.into(),
268 hash: 0.into(),
269 phantom: PhantomData,
270 }
271 }
272
273 pub fn addr(&self) -> BlockAddr {
274 BlockAddr(self.addr.to_ne())
275 }
276
277 pub fn hash(&self) -> u64 {
278 self.hash.to_ne()
279 }
280
281 pub fn is_null(&self) -> bool {
282 self.addr().is_null()
283 }
284
285 pub fn marker(level: u8) -> Self {
286 assert!(level <= 0xF);
287 Self {
288 addr: (0xFFFF_FFFF_FFFF_FFF0 | (level as u64)).into(),
289 hash: u64::MAX.into(),
290 phantom: PhantomData,
291 }
292 }
293
294 pub fn is_marker(&self) -> bool {
295 (self.addr.to_ne() | 0xF) == u64::MAX && self.hash.to_ne() == u64::MAX
296 }
297
298 pub unsafe fn cast<U>(self) -> BlockPtr<U> {
303 BlockPtr {
304 addr: self.addr,
305 hash: self.hash,
306 phantom: PhantomData,
307 }
308 }
309
310 #[must_use = "the returned pointer should usually be deallocated"]
311 pub fn clear(&mut self) -> BlockPtr<T> {
312 let mut ptr = Self::default();
313 mem::swap(self, &mut ptr);
314 ptr
315 }
316}
317
318impl<T> Clone for BlockPtr<T> {
319 fn clone(&self) -> Self {
320 *self
321 }
322}
323
324impl<T> Copy for BlockPtr<T> {}
325
326impl<T> Default for BlockPtr<T> {
327 fn default() -> Self {
328 Self {
329 addr: 0.into(),
330 hash: 0.into(),
331 phantom: PhantomData,
332 }
333 }
334}
335
336impl<T> fmt::Debug for BlockPtr<T> {
337 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
338 let addr = self.addr();
339 let hash = self.hash();
340 f.debug_struct("BlockPtr")
341 .field("addr", &addr)
342 .field("hash", &hash)
343 .finish()
344 }
345}
346
347#[repr(C, packed)]
348#[derive(Clone)]
349pub struct BlockRaw([u8; BLOCK_SIZE as usize]);
350
351unsafe impl BlockTrait for BlockRaw {
352 fn empty(level: BlockLevel) -> Option<Self> {
353 if level.0 == 0 {
354 Some(Self([0; BLOCK_SIZE as usize]))
355 } else {
356 None
357 }
358 }
359}
360
361impl ops::Deref for BlockRaw {
362 type Target = [u8];
363 fn deref(&self) -> &[u8] {
364 &self.0
365 }
366}
367
368impl ops::DerefMut for BlockRaw {
369 fn deref_mut(&mut self) -> &mut [u8] {
370 &mut self.0
371 }
372}
373
374#[test]
375fn block_list_size_test() {
376 assert_eq!(mem::size_of::<BlockList<BlockRaw>>(), BLOCK_SIZE as usize);
377}
378
379#[test]
380fn block_raw_size_test() {
381 assert_eq!(mem::size_of::<BlockRaw>(), BLOCK_SIZE as usize);
382}
383
384#[test]
385fn block_ptr_marker_test() {
386 let ptr = BlockPtr::<BlockRaw>::marker(0);
387 assert_eq!(ptr.addr().level().0, 0);
388 assert!(ptr.is_marker());
389
390 let ptr = BlockPtr::<BlockRaw>::marker(2);
391 assert_eq!(ptr.addr().level().0, 2);
392 assert!(ptr.is_marker());
393}