Skip to main content

oxifaster/
record.rs

1//! Record header for the hybrid log.
2//!
3//! The hybrid log stores variable-sized records. The common header is `RecordInfo`,
4//! which links records into hash chains and stores per-record flags.
5
6use std::mem;
7use std::sync::atomic::{AtomicU64, Ordering};
8
9use crate::address::Address;
10
11#[cfg(feature = "f2")]
12use bytemuck::Pod;
13
14/// Record header, internal to FASTER.
15///
16/// The header is 8 bytes and contains:
17/// - Previous address (48 bits): Points to the previous record in the hash chain
18/// - Checkpoint version (13 bits): Version when record was created
19/// - Invalid bit (1 bit): Whether the record has been invalidated
20/// - Tombstone bit (1 bit): Whether this is a delete marker
21/// - Final bit (1 bit): Whether this is the final record in a chain
22#[repr(C)]
23pub struct RecordInfo {
24    control: AtomicU64,
25}
26
27impl RecordInfo {
28    /// Mask for the previous address (48 bits)
29    const PREV_ADDR_MASK: u64 = (1 << 48) - 1;
30
31    /// Shift for checkpoint version
32    const VERSION_SHIFT: u32 = 48;
33    /// Mask for checkpoint version (13 bits)
34    const VERSION_MASK: u64 = (1 << 13) - 1;
35
36    /// Bit position for invalid flag
37    const INVALID_BIT: u64 = 1 << 61;
38    /// Bit position for tombstone flag
39    const TOMBSTONE_BIT: u64 = 1 << 62;
40    /// Bit position for final flag
41    const FINAL_BIT: u64 = 1 << 63;
42
43    /// Read cache bit in the address portion
44    const READ_CACHE_BIT: u64 = 1 << 47;
45
46    /// Create a new record info.
47    pub fn new(
48        previous_address: Address,
49        checkpoint_version: u16,
50        invalid: bool,
51        tombstone: bool,
52        final_bit: bool,
53    ) -> Self {
54        let mut control = previous_address.control() & Self::PREV_ADDR_MASK;
55        control |= ((checkpoint_version as u64) & Self::VERSION_MASK) << Self::VERSION_SHIFT;
56        if invalid {
57            control |= Self::INVALID_BIT;
58        }
59        if tombstone {
60            control |= Self::TOMBSTONE_BIT;
61        }
62        if final_bit {
63            control |= Self::FINAL_BIT;
64        }
65        Self {
66            control: AtomicU64::new(control),
67        }
68    }
69
70    /// Create a record info from raw control value.
71    pub fn from_control(control: u64) -> Self {
72        Self {
73            control: AtomicU64::new(control),
74        }
75    }
76
77    /// Check if the record info is null (all zeros).
78    #[inline]
79    pub fn is_null(&self) -> bool {
80        self.control.load(Ordering::Acquire) == 0
81    }
82
83    /// Get the previous address in the hash chain.
84    #[inline]
85    pub fn previous_address(&self) -> Address {
86        Address::from_control(self.control.load(Ordering::Acquire) & Self::PREV_ADDR_MASK)
87    }
88
89    /// Set the previous address.
90    #[inline]
91    pub fn set_previous_address(&self, addr: Address) {
92        let mut current = self.control.load(Ordering::Acquire);
93        loop {
94            let new_val =
95                (current & !Self::PREV_ADDR_MASK) | (addr.control() & Self::PREV_ADDR_MASK);
96            match self.control.compare_exchange_weak(
97                current,
98                new_val,
99                Ordering::AcqRel,
100                Ordering::Acquire,
101            ) {
102                Ok(_) => break,
103                Err(actual) => current = actual,
104            }
105        }
106    }
107
108    /// Get the checkpoint version.
109    #[inline]
110    pub fn checkpoint_version(&self) -> u16 {
111        ((self.control.load(Ordering::Acquire) >> Self::VERSION_SHIFT) & Self::VERSION_MASK) as u16
112    }
113
114    /// Check if the record is invalid.
115    #[inline]
116    pub fn is_invalid(&self) -> bool {
117        (self.control.load(Ordering::Acquire) & Self::INVALID_BIT) != 0
118    }
119
120    /// Set the invalid flag.
121    #[inline]
122    pub fn set_invalid(&self, invalid: bool) {
123        if invalid {
124            self.control.fetch_or(Self::INVALID_BIT, Ordering::AcqRel);
125        } else {
126            self.control.fetch_and(!Self::INVALID_BIT, Ordering::AcqRel);
127        }
128    }
129
130    /// Check if this is a tombstone (delete marker).
131    #[inline]
132    pub fn is_tombstone(&self) -> bool {
133        (self.control.load(Ordering::Acquire) & Self::TOMBSTONE_BIT) != 0
134    }
135
136    /// Set the tombstone flag.
137    #[inline]
138    pub fn set_tombstone(&self, tombstone: bool) {
139        if tombstone {
140            self.control.fetch_or(Self::TOMBSTONE_BIT, Ordering::AcqRel);
141        } else {
142            self.control
143                .fetch_and(!Self::TOMBSTONE_BIT, Ordering::AcqRel);
144        }
145    }
146
147    /// Check if this is the final record in a chain.
148    #[inline]
149    pub fn is_final(&self) -> bool {
150        (self.control.load(Ordering::Acquire) & Self::FINAL_BIT) != 0
151    }
152
153    /// Check if the previous address points to read cache.
154    #[inline]
155    pub fn in_read_cache(&self) -> bool {
156        (self.control.load(Ordering::Acquire) & Self::READ_CACHE_BIT) != 0
157    }
158
159    /// Get the raw control value.
160    #[inline]
161    pub fn control(&self) -> u64 {
162        self.control.load(Ordering::Acquire)
163    }
164
165    /// Load the control value with specified ordering.
166    #[inline]
167    pub fn load(&self, ordering: Ordering) -> u64 {
168        self.control.load(ordering)
169    }
170
171    /// Store the control value with specified ordering.
172    #[inline]
173    pub fn store(&self, value: u64, ordering: Ordering) {
174        self.control.store(value, ordering);
175    }
176}
177
178impl Clone for RecordInfo {
179    fn clone(&self) -> Self {
180        Self {
181            control: AtomicU64::new(self.control.load(Ordering::Acquire)),
182        }
183    }
184}
185
186impl Default for RecordInfo {
187    fn default() -> Self {
188        Self {
189            control: AtomicU64::new(0),
190        }
191    }
192}
193
194impl std::fmt::Debug for RecordInfo {
195    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
196        f.debug_struct("RecordInfo")
197            .field("previous_address", &self.previous_address())
198            .field("checkpoint_version", &self.checkpoint_version())
199            .field("invalid", &self.is_invalid())
200            .field("tombstone", &self.is_tombstone())
201            .field("final", &self.is_final())
202            .finish()
203    }
204}
205
206#[cfg(feature = "f2")]
207#[repr(C)]
208pub(crate) struct Record<K: Pod, V: Pod> {
209    pub(crate) header: RecordInfo,
210    _marker: std::marker::PhantomData<(K, V)>,
211}
212
213#[cfg(feature = "f2")]
214impl<K: Pod, V: Pod> Record<K, V> {
215    const ALIGN: usize = 8;
216
217    #[inline]
218    pub(crate) const fn size() -> usize {
219        Self::align_up(
220            mem::size_of::<RecordInfo>() + mem::size_of::<K>() + mem::size_of::<V>(),
221            Self::ALIGN,
222        )
223    }
224
225    #[inline]
226    pub(crate) const fn key_offset() -> usize {
227        mem::size_of::<RecordInfo>()
228    }
229
230    #[inline]
231    pub(crate) const fn value_offset() -> usize {
232        mem::size_of::<RecordInfo>() + mem::size_of::<K>()
233    }
234
235    #[inline]
236    const fn align_up(n: usize, align: usize) -> usize {
237        debug_assert!(align.is_power_of_two());
238        (n + (align - 1)) & !(align - 1)
239    }
240
241    #[inline]
242    pub(crate) unsafe fn read_key(base: *const u8) -> K {
243        let ptr = base.add(Self::key_offset()) as *const K;
244        std::ptr::read_unaligned(ptr)
245    }
246
247    #[inline]
248    pub(crate) unsafe fn read_value(base: *const u8) -> V {
249        let ptr = base.add(Self::value_offset()) as *const V;
250        std::ptr::read_unaligned(ptr)
251    }
252
253    #[inline]
254    pub(crate) unsafe fn write_key(base: *mut u8, key: K) {
255        let ptr = base.add(Self::key_offset()) as *mut K;
256        std::ptr::write_unaligned(ptr, key);
257    }
258
259    #[inline]
260    pub(crate) unsafe fn write_value(base: *mut u8, value: V) {
261        let ptr = base.add(Self::value_offset()) as *mut V;
262        std::ptr::write_unaligned(ptr, value);
263    }
264}
265
266// RecordInfo should be exactly 8 bytes.
267const _: () = assert!(mem::size_of::<RecordInfo>() == 8);