spacetimedb_table/
var_len.rs

1//! Provides the definitions of [`VarLenRef`], [`VarLenGranule`], and [`VarLenMembers`].
2//!
3//! We allocate variable-length objects within rows, e.g. strings and arrays,
4//! separately from the fixed-length parts of rows.
5//! The fixed-length part of the page starts at the top (offset 0) and grows downward,
6//! while the var-length part of the page starts at the bottom (largest offset) and grows upward.
7//!
8//! Within the fixed-length part of the row, each var-len object is given a [`VarLenRef`],
9//! which allows a mutator to locate the var-len object.
10//!
11//! The var-length objects are BSATN-encoded to produce a bytestring
12//! (except strings, which are stored directly as UTF-8 bytestrings),
13//! and stored in a linked list of 64-byte "granules,"
14//! each of which has a 2-byte header and up to 62 bytes of data.
15//! This means that var-length objects never store padding bytes;
16//! every byte in a var-len object at an index less than the object's length
17//! will be initialized.
18//!
19//! At various points in the row's lifecycle,
20//! we must visit all of the `VarLenRef`s within the row,
21//! e.g. to fix-up pointers when copying a row into a new page.
22//! This process is driven by a `VarLenMembers` visitor.
23//!
24//! This file defines the representation of the linked list of granules [`VarLenGranule`],
25//! the [`VarLenRef`] pointers to variable-length objects,
26//! and the trait [`VarLenMembers`] which visits `VarLenRef`s within a fixed-length row.
27//!
28//! The broad strokes of var-len allocation are described in the Mem Arch Redesign proposal,
29// Intentionally not a link, in case we ever want to publish this crate.
30//! `../../../../proposals/0001-mem-arch-redesign/mem-arch-redesign.md`.
31//! Note that the proposal uses the words "blocks" or "chunks" where we use "granules."
32
33use super::{
34    blob_store::BlobHash,
35    indexes::{Byte, Bytes, PageOffset},
36};
37use crate::{static_assert_align, static_assert_size};
38use core::iter;
39use core::marker::PhantomData;
40use core::mem::{self};
41use spacetimedb_sats::layout::{Size, VAR_LEN_REF_LAYOUT};
42
43/// Reference to var-len object within a page.
44// TODO: make this larger and do short-string optimization?
45// - Or store a few elts inline and then a `VarLenRef`?
46// - Or first store `VarLenRef` that records num inline elements
47//   (remaining inline "uninit," actually valid-unconstrained)
48//  (bitfield; only need 10 bits for `len_in_bytes`)?
49#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
50#[repr(C)]
51pub struct VarLenRef {
52    /// The length of the var-len object in bytes.
53    /// When `self.is_large_blob()` returns true,
54    /// this is not the proper length of the object.
55    /// Rather, the blob store must be consulted for the true length.
56    pub length_in_bytes: u16,
57    /// The offset to the first granule containing some of the object's data
58    /// as well a pointer to the next granule.
59    pub first_granule: PageOffset,
60}
61
62// Implementation of `VarLenMembers` visitors depend on
63// `size = 4` and `align = 2` of `VarLenRef`.
64static_assert_size!(VarLenRef, 4);
65static_assert_align!(VarLenRef, 2);
66
67const _: () = assert!(VAR_LEN_REF_LAYOUT.size as usize == mem::size_of::<VarLenRef>());
68const _: () = assert!(VAR_LEN_REF_LAYOUT.align as usize == mem::align_of::<VarLenRef>());
69
70impl VarLenRef {
71    /// Does this refer to a large blob object
72    /// where `self.first_granule` is a blob hash?
73    #[inline]
74    pub const fn is_large_blob(self) -> bool {
75        self.length_in_bytes == Self::LARGE_BLOB_SENTINEL
76    }
77
78    /// The sentinel for a var-len ref to a large blob.
79    pub const LARGE_BLOB_SENTINEL: u16 = u16::MAX;
80
81    /// Returns a var-len ref for a large blob object.
82    #[inline]
83    pub const fn large_blob(first_granule: PageOffset) -> Self {
84        Self {
85            length_in_bytes: Self::LARGE_BLOB_SENTINEL,
86            first_granule,
87        }
88    }
89
90    /// Returns the number of granules this var-len ref uses in it page.
91    #[inline]
92    pub const fn granules_used(&self) -> usize {
93        VarLenGranule::bytes_to_granules(self.length_in_bytes as usize).0
94    }
95
96    /// Is this reference NULL, i.e. empty?
97    #[inline]
98    pub const fn is_null(self) -> bool {
99        self.first_granule.is_var_len_null()
100    }
101
102    /// The NULL var-len reference for empty variable components.
103    ///
104    /// A null `VarLenRef` can occur when a row has no var-len component
105    /// or needs to point to one that is empty.
106    pub const NULL: Self = Self {
107        length_in_bytes: 0,
108        first_granule: PageOffset::VAR_LEN_NULL,
109    };
110}
111
112const _BLOB_SENTINEL_MORE_THAN_MAX_OBJ_SIZE: () =
113    assert!(VarLenGranule::OBJECT_SIZE_BLOB_THRESHOLD < VarLenRef::LARGE_BLOB_SENTINEL as usize);
114
115const _GRANULES_USED_FOR_BLOB_IS_CONSISTENT: () = {
116    let vlr = VarLenRef::large_blob(PageOffset::VAR_LEN_NULL);
117    assert!(vlr.is_large_blob() == (vlr.granules_used() == 1));
118};
119
120/// Returns whether `offset` is properly aligned for storing a [`VarLenGranule`].
121pub fn is_granule_offset_aligned(offset: PageOffset) -> bool {
122    offset.0 == offset.0 & VarLenGranuleHeader::NEXT_BITMASK
123}
124
125/// The header of a [`VarLenGranule`] storing
126/// - (low 6 bits) the number of bytes the granule contains
127/// - (high 10 bits) the offset of the next granule in the linked-list
128///   used to store an object in variable storage.
129///
130/// For efficiency, this data is packed as a bitfield
131/// in a `u16` with bits used per above.
132#[derive(Copy, Clone)]
133pub struct VarLenGranuleHeader(u16);
134
135impl VarLenGranuleHeader {
136    /// The total size of a variable granule's header in bytes.
137    const SIZE: usize = mem::size_of::<Self>();
138
139    /// The number of bits used to store the `len` of a [`VarLenGranule`] is 6.
140    const LEN_BITS: u16 = 6;
141
142    /// The `len` of a [`VarLenGranule`] is stored in the low 6 bits.
143    ///
144    /// The 6 bits are enough to store at most `2^6` (`64`).
145    /// However, a granule can never store more than [`VarLenGranule::DATA_SIZE`] (`62`),
146    /// which is `2` less than `2^6`.
147    ///
148    /// We will also never allocate a `VarLenGranule` with len 0.
149    ///
150    /// This means that the `len` field of a `VarLenGranule` has two dead states,
151    /// 0 and 63. We could use these as sentinels,
152    /// but currently have no use for them.
153    const LEN_BITMASK: u16 = (1 << Self::LEN_BITS) - 1;
154
155    /// The [`LEN_BITMASK`] will preserve all granule lengths possible.
156    #[allow(clippy::assertions_on_constants)]
157    const _ASSERT_LEN_BITMASK_FITS_ALL_POSSIBLE_GRANULE_LENGTHS: () =
158        assert!(VarLenGranule::DATA_SIZE <= Self::LEN_BITMASK as usize);
159
160    // The `next` of a `VarLenGranule` is stored in the high 10 bits.
161    // It is not shifted; the low 6 bits will always be 0 due to alignment.
162    const NEXT_BITMASK: u16 = !Self::LEN_BITMASK;
163
164    /// Returns a new header with the length component changed to `len`.
165    fn with_len(self, len: u8) -> Self {
166        // Zero any previous `len` field.
167        let mut new = self;
168        new.0 &= !Self::LEN_BITMASK;
169
170        // Ensure that the `len` doesn't overflow into the `next`.
171        let capped_len = (len as u16) & Self::LEN_BITMASK;
172        debug_assert_eq!(
173            capped_len, len as u16,
174            "Len {len} overflows the length of a `VarLenGranule`",
175        );
176
177        // Insert the truncated `len`.
178        new.0 |= capped_len;
179
180        debug_assert_eq!(self.next(), new.next(), "`set_len` has modified `next`");
181        debug_assert_eq!(
182            new.len() as u16,
183            capped_len,
184            "`set_len` has not inserted the correct `len`: expected {:x}, found {:x}",
185            capped_len,
186            new.len()
187        );
188
189        new
190    }
191
192    /// Returns a new header with the next-granule component changed to `next`.
193    fn with_next(self, PageOffset(next): PageOffset) -> Self {
194        let mut new = self;
195
196        // Zero any previous `next` field.
197        new.0 &= !Self::NEXT_BITMASK;
198
199        // Ensure that the `next` is aligned,
200        // and therefore doesn't overwrite any of the `len`.
201        let aligned_next = next & Self::NEXT_BITMASK;
202        debug_assert_eq!(aligned_next, next, "Next {next:x} is unaligned");
203
204        // Insert the aligned `next`.
205        new.0 |= aligned_next;
206
207        debug_assert_eq!(self.len(), new.len(), "`set_next` has modified `len`");
208        debug_assert_eq!(
209            new.next().0,
210            aligned_next,
211            "`set_next` has not inserted the correct `next`"
212        );
213
214        new
215    }
216
217    /// Returns a new header for a granule storing `len` bytes
218    /// and with the next granule in the list located `next`.
219    pub fn new(len: u8, next: PageOffset) -> Self {
220        Self(0).with_len(len).with_next(next)
221    }
222
223    /// Returns the number of bytes the granule contains.
224    const fn len(&self) -> u8 {
225        (self.0 & Self::LEN_BITMASK) as u8
226    }
227
228    /// Returns the offset / Address of the next granule in the linked-list.
229    pub const fn next(&self) -> PageOffset {
230        PageOffset(self.0 & Self::NEXT_BITMASK)
231    }
232}
233
234/// Each variable length object in a page is stored as a linked-list of chunks.
235/// These chunks are called *granules* and they can store up to 62 bytes of `data`.
236/// Additionally, 2 bytes are used for the [`header: VarLenGranuleHeader`](VarLenGranuleHeader).
237#[repr(C)] // Required for a stable ABI.
238#[repr(align(64))] // Alignment must be same as `VarLenGranule::SIZE`.
239pub struct VarLenGranule {
240    /// The header of the granule, containing the length and the next-cell offset.
241    pub header: VarLenGranuleHeader,
242    /// The data storing some part, or whole, of the var-len object.
243    pub data: [Byte; Self::DATA_SIZE],
244}
245
246impl VarLenGranule {
247    /// The total size of a variable length granule in bytes.
248    pub const SIZE: Size = Size(64);
249
250    /// The size, in bytes, of the data section of a variable length granule.
251    pub const DATA_SIZE: usize = Self::SIZE.len() - VarLenGranuleHeader::SIZE;
252
253    /// The max number of granules an object can use
254    /// before being put into large blob storage.
255    pub const OBJECT_MAX_GRANULES_BEFORE_BLOB: usize = 16;
256
257    /// The max size of an object before being put into large blob storage.
258    pub const OBJECT_SIZE_BLOB_THRESHOLD: usize = Self::DATA_SIZE * Self::OBJECT_MAX_GRANULES_BEFORE_BLOB;
259
260    /// Returns the number of granules that would fit into `available_len`.
261    pub const fn space_to_granules(available_len: Size) -> usize {
262        // Floor division (the default div operator) here
263        // to ensure we don't allocate e.g., a 64-byte granule in a 63-byte space.
264        available_len.len() / Self::SIZE.len()
265    }
266
267    /// Returns the number of granules needed to store an object of `len_in_bytes` in size.
268    /// Also returns whether the object needs to go into the blob store.
269    pub const fn bytes_to_granules(len_in_bytes: usize) -> (usize, bool) {
270        if len_in_bytes > VarLenGranule::OBJECT_SIZE_BLOB_THRESHOLD {
271            // If `obj` is large enough to go in the blob store,
272            // you require space for a blob-hash,
273            // rather than the whole object.
274            // A blob hash fits in a single granule as BLAKE3 needs 32 bytes < 62 bytes.
275            (1, true)
276        } else {
277            // Using `div_ceil` here to ensure over- rather than under-allocation.
278            (len_in_bytes.div_ceil(Self::DATA_SIZE), false)
279        }
280    }
281
282    /// Chunks `bytes` into an iterator where each element fits into a granule.
283    pub fn chunks(bytes: &[u8]) -> impl DoubleEndedIterator<Item = &[u8]> {
284        bytes.chunks(Self::DATA_SIZE)
285    }
286
287    /// Returns the data from the var-len object in this granule.
288    pub fn data(&self) -> &[u8] {
289        let len = self.header.len() as usize;
290        &self.data[0..len]
291    }
292
293    /// Assumes that the granule stores a [`BlobHash`] and returns it.
294    ///
295    /// Panics if the assumption is wrong.
296    pub fn blob_hash(&self) -> BlobHash {
297        self.data().try_into().unwrap()
298    }
299}
300
301/// A single [`VarLenGranule`] is needed to store a [`BlobHash`].
302#[allow(clippy::assertions_on_constants)]
303const _VLG_CAN_STORE_BLOB_HASH: () = assert!(VarLenGranule::DATA_SIZE >= BlobHash::SIZE);
304
305/// A visitor object which can iterate over the var-len slots in a row.
306///
307/// Each var-len visitor is specialized to a particular row type,
308/// though implementors of `VarLenMembers` decide whether this specialization
309/// is per instance or per type.
310///
311/// The trivial implementor of `VarLenMembers` is [`AlignedVarLenOffsets`],
312/// which stores the offsets of var-len members in a particular row type in a slice,
313/// and uses pointer arithmetic to return references to them.
314///
315/// # Safety
316///
317/// - `Self::visit_var_len` and `Self::visit_var_len_mut`
318///   must visit the same set of `VarLenRef`s in the same order.
319///   Various consumers in `Page` and friends depend on this and the previous requirement.
320pub unsafe trait VarLenMembers {
321    /// The iterator type returned by [`VarLenMembers::visit_var_len`].
322    type Iter<'this, 'row>: Iterator<Item = &'row VarLenRef>
323    where
324        Self: 'this;
325
326    /// The iterator type returned by [`VarLenMembers::visit_var_len_mut`].
327    type IterMut<'this, 'row>: Iterator<Item = &'row mut VarLenRef>
328    where
329        Self: 'this;
330
331    /// Treats `row` as storage for a row of the particular type handled by `self`,
332    /// and iterates over the (possibly stale) `VarLenRef`s within it.
333    ///
334    /// Visited `VarLenRef`s are valid-unconstrained
335    /// and will always be valid from Rust/LLVM's perspective,
336    /// i.e. will never be uninit,
337    /// but will not necessarily point to properly-allocated `VarLenGranule`s.
338    ///
339    /// Callers are responsible for maintaining whether var-len members have been initialized.
340    ///
341    /// # Safety
342    ///
343    /// - `row` must be properly aligned for the row type.
344    ///   This alignment constraint should be defined (and documented!)
345    ///   by the implementor of `VarLenMembers`.
346    ///
347    /// - `row` must further be a slice of exactly the number of bytes of the row type.
348    ///   Implementors may or may not check this property via `debug_assert!`,
349    ///   but callers *must always* ensure it for safety.
350    ///   These invariants allow us to construct references to [`VarLenRef`]s inside the slice.
351    ///
352    ///   Note that `Iterator::next` is a safe function,
353    ///   so it must always be valid to advance an iterator to its end.
354    ///
355    /// - All callers of `visit_var_len` on a particular `row`
356    ///   must visit the same set of `VarLenRef`s in the same order,
357    ///   though they may do so through different implementors of `VarLenMembers`.
358    ///   E.g. it would be valid to use an `AlignedVarLenOffsets` to initialize a row,
359    ///   then later read from it using a hypothetical optimized JITted visitor,
360    ///   provided the JITted visitor visited the same set of offsets.
361    unsafe fn visit_var_len_mut<'this, 'row>(&'this self, row: &'row mut Bytes) -> Self::IterMut<'this, 'row>;
362
363    /// Treats `row` as storage for a row of the particular type handled by `self`,
364    /// and iterates over the (possibly stale) `VarLenRef`s within it.
365    ///
366    /// Visited `VarLenRef`s are valid-unconstrained
367    /// and will always be valid from Rust/LLVM's perspective,
368    /// i.e. will never be uninit,
369    /// but will not necessarily point to properly-allocated `VarLenGranule`s.
370    ///
371    /// Callers are responsible for maintaining whether var-len members have been initialized.
372    ///
373    /// # Safety
374    ///
375    /// - `row` must be properly aligned for the row type.
376    ///   This alignment constraint should be defined (and documented!)
377    ///   by the implementor of `VarLenMembers`.
378    ///
379    /// - `row` must further be a slice of exactly the number of bytes of the row type.
380    ///   Implementors may or may not check this property via `debug_assert!`,
381    ///   but callers *must always* ensure it for safety.
382    ///   These invariants allow us to construct references to [`VarLenRef`]s inside the slice.
383    ///
384    ///   Note that `Iterator::next` is a safe function,
385    ///   so it must always be valid to advance an iterator to it end.
386    ///
387    /// - All callers of `visit_var_len` on a particular `row`
388    ///   must visit the same set of `VarLenRef`s in the same order,
389    ///   though they may do so through different implementors of `VarLenMembers`.
390    ///   E.g. it would be valid to use an `AlignedVarLenOffsets` to initialize a row,
391    ///   then later read from it using a hypothetical optimized JITted visitor,
392    ///   provided the JITted visitor visited the same set of offsets.
393    unsafe fn visit_var_len<'this, 'row>(&'this self, row: &'row Bytes) -> Self::Iter<'this, 'row>;
394}
395
396/// Slice of offsets to var-len members, in units of 2-byte words.
397///
398/// This type is intended as a demonstration of the `VarLenMembers` interface,
399/// and is used in testing and benchmarking.
400///
401/// Note that this visitor is not suitable for sum types, or for types which contain sums.
402///
403/// Units of 2-byte words because `VarLenRef` is 2-byte aligned.
404/// Note that `VarLenRef` is 4 bytes wide, but only 2-byte aligned.
405///
406/// The listed offsets must not overlap, i.e. there must be a gap of at least 2 between each offset.
407///
408/// For `AlignedVarLenOffsets([n])`, a 4-byte `VarLenRef` exists in each row at +2n bytes.
409///
410/// e.g.:
411/// `AlignedVarLenOffsets([0, 4])`
412/// has:
413/// row >= 12 bytes,
414/// - var-len ref at +0..4 bytes (i.e. +0..2 `u16`s).
415/// - fixed-len field(s) at +4..8 bytes (i.e. +2..4 `u16`s).
416/// - var-len ref at +8..12 bytes (i.e. +4..6 `u16`s).
417/// - fixed-len field(s) at +12.. (i.e. +6.. `u16`s), if row_size > 12.
418#[derive(Copy, Clone)]
419pub struct AlignedVarLenOffsets<'a>(&'a [u16]);
420
421impl<'a> AlignedVarLenOffsets<'a> {
422    /// Returns an [`AlignedVarLenOffsets`] using `offsets`.
423    pub const fn from_offsets(offsets: &'a [u16]) -> Self {
424        Self(offsets)
425    }
426}
427
428// SAFETY: `visit_var_len` and `visit_var_len_mut` are only different
429// in that they yield `&` vs. `&mut` and are otherwise identical.
430unsafe impl VarLenMembers for AlignedVarLenOffsets<'_> {
431    type Iter<'this, 'row>
432        = AlignedVarLenOffsetsIter<'this, 'row>
433    where
434        Self: 'this;
435
436    type IterMut<'this, 'row>
437        = AlignedVarLenOffsetsIterMut<'this, 'row>
438    where
439        Self: 'this;
440
441    /// # Safety
442    ///
443    /// `row` must be 2-byte aligned.
444    ///
445    /// `row` must be an allocation of at least `2n + 2` bytes,
446    /// where `n` is the largest offset in `self`.
447    ///
448    /// All callers of `visit_var_len` on a particular `row`
449    /// must visit the same set of `VarLenRef`s,
450    /// though they may do so through different implementors of `VarLenMembers`.
451    /// E.g. it would be valid to use an `AlignedVarLenOffsets` to initialize a row,
452    /// then later read from it using a hypothetical optimized JITted visitor,
453    /// provided the JITted visitor visited the same set of offsets.
454    unsafe fn visit_var_len<'this, 'row>(&'this self, row: &'row Bytes) -> Self::Iter<'this, 'row> {
455        AlignedVarLenOffsetsIter {
456            offsets: self,
457            _row_lifetime: PhantomData,
458            row: row.as_ptr(),
459            next_offset_idx: 0,
460        }
461    }
462
463    /// # Safety
464    ///
465    /// `row` must be 2-byte aligned.
466    ///
467    /// `row` must be an allocation of at least `2n + 2` bytes,
468    /// where `n` is the largest offset in `self`.
469    ///
470    /// All callers of `visit_var_len` on a particular `row`
471    /// must visit the same set of `VarLenRef`s,
472    /// though they may do so through different implementors of `VarLenMembers`.
473    /// E.g. it would be valid to use an `AlignedVarLenOffsets` to initialize a row,
474    /// then later read from it using a hypothetical optimized JITted visitor,
475    /// provided the JITted visitor visited the same set of offsets.
476    unsafe fn visit_var_len_mut<'this, 'row>(&'this self, row: &'row mut Bytes) -> Self::IterMut<'this, 'row> {
477        AlignedVarLenOffsetsIterMut {
478            offsets: self,
479            _row_lifetime: PhantomData,
480            row: row.as_mut_ptr(),
481            next_offset_idx: 0,
482        }
483    }
484}
485
486pub struct AlignedVarLenOffsetsIter<'offsets, 'row> {
487    offsets: &'offsets AlignedVarLenOffsets<'offsets>,
488    _row_lifetime: PhantomData<&'row Bytes>,
489    row: *const Byte,
490    next_offset_idx: usize,
491}
492
493impl<'row> Iterator for AlignedVarLenOffsetsIter<'_, 'row> {
494    type Item = &'row VarLenRef;
495
496    fn next(&mut self) -> Option<Self::Item> {
497        if self.next_offset_idx >= self.offsets.0.len() {
498            None
499        } else {
500            // I sure would like to be able to write `self.next_offset_idx.post_increment(1)`...
501            // - pgoldman(2023-11-16).
502            let curr_offset_idx = self.next_offset_idx;
503            self.next_offset_idx += 1;
504
505            // SAFETY: `AlignedVarLenOffsets::visit_var_len`'s safety requirements
506            //         mean that `row` is always 2-byte aligned, so this will be too,
507            //         and that `row` is large enough for all the `offsets`,
508            //         so this `add` is always in-bounds.
509            let elt_ptr: *const VarLenRef =
510                unsafe { self.row.add(curr_offset_idx * mem::align_of::<VarLenRef>()).cast() };
511
512            // SAFETY: `elt_ptr` is aligned and inbounds.
513            //         Any pattern of init bytes is valid at `VarLenRef`,
514            //         and the `row_data` in a `Page` is never uninit,
515            //         so it's safe to create an `&mut` to any value in the page,
516            //         though the resulting `VarLenRef` may be garbage.
517            Some(unsafe { &*elt_ptr })
518        }
519    }
520}
521
522pub struct AlignedVarLenOffsetsIterMut<'offsets, 'row> {
523    offsets: &'offsets AlignedVarLenOffsets<'offsets>,
524    _row_lifetime: PhantomData<&'row mut Bytes>,
525    row: *mut Byte,
526    next_offset_idx: usize,
527}
528
529impl<'row> Iterator for AlignedVarLenOffsetsIterMut<'_, 'row> {
530    type Item = &'row mut VarLenRef;
531
532    fn next(&mut self) -> Option<Self::Item> {
533        if self.next_offset_idx >= self.offsets.0.len() {
534            None
535        } else {
536            // I sure would like to be able to write `self.next_offset_idx.post_increment(1)`...
537            // - pgoldman(2023-11-16).
538            let curr_offset_idx = self.next_offset_idx;
539            self.next_offset_idx += 1;
540
541            // SAFETY: `AlignedVarLenOffsets::visit_var_len`'s safety requirements
542            //         mean that `row` is always 2-byte aligned, so this will be too,
543            //         and that `row` is large enough for all the `offsets`,
544            //         so this `add` is always in-bounds.
545            let elt_ptr: *mut VarLenRef =
546                unsafe { self.row.add(curr_offset_idx * mem::align_of::<VarLenRef>()).cast() };
547
548            // SAFETY: `elt_ptr` is aligned and inbounds.
549            //         Any pattern of init bytes is valid at `VarLenRef`,
550            //         and the `row_data` in a `Page` is never uninit,
551            //         so it's safe to create an `&mut` to any value in the page,
552            //         though the resulting `VarLenRef` may be garbage.
553            Some(unsafe { &mut *elt_ptr })
554        }
555    }
556}
557
558/// A `VarLenMembers` visitor for row types with no var-len components,
559/// which never visits anything.
560#[derive(Copy, Clone)]
561pub struct NullVarLenVisitor;
562
563// SAFETY: Both `visit_var_len` and `visit_var_len_mut` visit the empty set.
564unsafe impl VarLenMembers for NullVarLenVisitor {
565    type Iter<'this, 'row> = iter::Empty<&'row VarLenRef>;
566    type IterMut<'this, 'row> = iter::Empty<&'row mut VarLenRef>;
567
568    unsafe fn visit_var_len<'this, 'row>(&'this self, _row: &'row Bytes) -> Self::Iter<'this, 'row> {
569        iter::empty()
570    }
571
572    unsafe fn visit_var_len_mut<'this, 'row>(&'this self, _row: &'row mut Bytes) -> Self::IterMut<'this, 'row> {
573        iter::empty()
574    }
575}
576
577#[cfg(test)]
578mod test {
579    use super::*;
580    use proptest::prelude::*;
581
582    fn generate_var_len_offset() -> impl Strategy<Value = PageOffset> {
583        (0u16..(1 << 10)).prop_map(|unaligned| PageOffset(unaligned * VarLenGranule::SIZE.0))
584    }
585
586    fn generate_len() -> impl Strategy<Value = u8> {
587        0..(VarLenGranule::DATA_SIZE as u8)
588    }
589
590    proptest! {
591        #[test]
592        fn granule_header_bitbashing(len in generate_len(), next in generate_var_len_offset(), len2 in generate_len(), next2 in generate_var_len_offset()) {
593            let header = VarLenGranuleHeader::new(len, next);
594            prop_assert_eq!(len, header.len());
595            prop_assert_eq!(next, header.next());
596
597            let header_new_len = header.with_len(len2);
598            prop_assert_eq!(len2, header_new_len.len());
599            prop_assert_eq!(next, header_new_len.next());
600
601            let header_new_next = header.with_next(next2);
602            prop_assert_eq!(len, header_new_next.len());
603            prop_assert_eq!(next2, header_new_next.next());
604
605            prop_assert_eq!(header_new_len.with_next(next2).0, header_new_next.with_len(len2).0);
606        }
607    }
608}