spacetimedb_table/
bflatn_to.rs

1//! Provides the functions [`write_row_to_pages(pages, blob_store, ty, val)`]
2//! and [`write_row_to_page(page, blob_store, visitor, ty, val)`]
3//! which write `val: ProductValue` typed at `ty` to `page` and `pages` respectively.
4
5use super::{
6    blob_store::BlobStore,
7    indexes::{Bytes, PageOffset, RowPointer, SquashedOffset},
8    page::{GranuleOffsetIter, Page, VarView},
9    page_pool::PagePool,
10    pages::Pages,
11    table::BlobNumBytes,
12    util::range_move,
13    var_len::{VarLenGranule, VarLenMembers, VarLenRef},
14};
15use spacetimedb_sats::{
16    bsatn::{self, to_writer, DecodeError},
17    buffer::BufWriter,
18    de::DeserializeSeed as _,
19    i256,
20    layout::{
21        align_to, AlgebraicTypeLayout, HasLayout, ProductTypeLayoutView, RowTypeLayout, SumTypeLayout, VarLenType,
22    },
23    u256, AlgebraicType, AlgebraicValue, ProductValue, SumValue,
24};
25use thiserror::Error;
26
27#[derive(Error, Debug, PartialEq, Eq)]
28pub enum Error {
29    #[error(transparent)]
30    Decode(#[from] DecodeError),
31    #[error("Expected a value of type {0:?}, but found {1:?}")]
32    WrongType(AlgebraicType, AlgebraicValue),
33    #[error(transparent)]
34    PageError(#[from] super::page::Error),
35    #[error(transparent)]
36    PagesError(#[from] super::pages::Error),
37}
38
39/// Writes `row` typed at `ty` to `pages`
40/// using `blob_store` as needed to write large blobs.
41///
42/// Panics if `val` is not of type `ty`.
43///
44/// # Safety
45///
46/// `pages` must be specialized to store rows of `ty`.
47/// This includes that its `visitor` must be prepared to visit var-len members within `ty`,
48/// and must do so in the same order as a `VarLenVisitorProgram` for `ty` would,
49/// i.e. by monotonically increasing offsets.
50pub unsafe fn write_row_to_pages_bsatn(
51    pool: &PagePool,
52    pages: &mut Pages,
53    visitor: &impl VarLenMembers,
54    blob_store: &mut dyn BlobStore,
55    ty: &RowTypeLayout,
56    mut bytes: &[u8],
57    squashed_offset: SquashedOffset,
58) -> Result<(RowPointer, BlobNumBytes), Error> {
59    let val = ty.product().deserialize(bsatn::Deserializer::new(&mut bytes))?;
60    unsafe { write_row_to_pages(pool, pages, visitor, blob_store, ty, &val, squashed_offset) }
61}
62
63/// Writes `row` typed at `ty` to `pages`
64/// using `blob_store` as needed to write large blobs.
65///
66/// Panics if `val` is not of type `ty`.
67///
68/// # Safety
69///
70/// `pages` must be specialized to store rows of `ty`.
71/// This includes that its `visitor` must be prepared to visit var-len members within `ty`,
72/// and must do so in the same order as a `VarLenVisitorProgram` for `ty` would,
73/// i.e. by monotonically increasing offsets.
74pub unsafe fn write_row_to_pages(
75    pool: &PagePool,
76    pages: &mut Pages,
77    visitor: &impl VarLenMembers,
78    blob_store: &mut dyn BlobStore,
79    ty: &RowTypeLayout,
80    val: &ProductValue,
81    squashed_offset: SquashedOffset,
82) -> Result<(RowPointer, BlobNumBytes), Error> {
83    let num_granules = required_var_len_granules_for_row(val);
84
85    match pages.with_page_to_insert_row(pool, ty.size(), num_granules, |page| {
86        // SAFETY:
87        // - Caller promised that `pages` is suitable for storing instances of `ty`
88        //   so `page` is also suitable.
89        // - Caller promised that `visitor` is prepared to visit for `ty`
90        //   and in the same order as a `VarLenVisitorProgram` for `ty` would.
91        // - `visitor` came from `pages` which we can trust to visit in the right order.
92        unsafe { write_row_to_page(page, blob_store, visitor, ty, val) }
93    })? {
94        (page, Ok((offset, blob_inserted))) => {
95            Ok((RowPointer::new(false, page, offset, squashed_offset), blob_inserted))
96        }
97        (_, Err(e)) => Err(e),
98    }
99}
100
101/// Writes `row` typed at `ty` to `page`
102/// using `blob_store` as needed to write large blobs
103/// and `visitor` to fixup var-len pointers in the fixed-len row part.
104///
105/// Panics if `val` is not of type `ty`.
106///
107/// # Safety
108///
109/// - `page` must be prepared to store instances of `ty`.
110///
111/// - `visitor` must be prepared to visit var-len members within `ty`,
112///   and must do so in the same order as a `VarLenVisitorProgram` for `ty` would,
113///   i.e. by monotonically increasing offsets.
114///
115/// - `page` must use a var-len visitor which visits the same var-len members in the same order.
116pub unsafe fn write_row_to_page(
117    page: &mut Page,
118    blob_store: &mut dyn BlobStore,
119    visitor: &impl VarLenMembers,
120    ty: &RowTypeLayout,
121    val: &ProductValue,
122) -> Result<(PageOffset, BlobNumBytes), Error> {
123    let fixed_row_size = ty.size();
124    // SAFETY: We've used the right `row_size` and we trust that others have too.
125    // `RowTypeLayout` also ensures that we satisfy the minimum row size.
126    let fixed_offset = unsafe { page.alloc_fixed_len(fixed_row_size)? };
127
128    // Create the context for writing to `page`.
129    let (mut fixed, var_view) = page.split_fixed_var_mut();
130    let mut serialized = BflatnSerializedRowBuffer {
131        fixed_buf: fixed.get_row_mut(fixed_offset, fixed_row_size),
132        curr_offset: 0,
133        var_view,
134        last_allocated_var_len_index: 0,
135        large_blob_insertions: Vec::new(),
136    };
137
138    // Write the row to the page. Roll back on any failure.
139    if let Err(e) = serialized.write_product(ty.product(), val) {
140        // SAFETY: The `visitor` is proper for the row type per caller requirements.
141        unsafe { serialized.roll_back_var_len_allocations(visitor) };
142        // SAFETY:
143        // - `fixed_offset` came from `alloc_fixed_len` so it is in bounds of `page`.
144        // - `RowTypeLayout::size()` ensures `fixed_offset` is properly aligned for `FreeCellRef`.
145        unsafe { fixed.free(fixed_offset, fixed_row_size) };
146        return Err(e);
147    }
148
149    // Haven't stored large blobs or init those granules with blob hashes yet, so do it now.
150    let blob_store_inserted_bytes = serialized.write_large_blobs(blob_store);
151
152    Ok((fixed_offset, blob_store_inserted_bytes))
153}
154
155/// The writing / serialization context used by the function [`write_row_to_page`].
156struct BflatnSerializedRowBuffer<'page> {
157    /// The work-in-progress fixed part of the row,
158    /// allocated inside the page.
159    fixed_buf: &'page mut Bytes,
160
161    /// The current offset into `fixed_buf` at which we are writing.
162    ///
163    /// The various writing methods will advance `curr_offset`.
164    curr_offset: usize,
165
166    /// The number of inserted var-len objects to the page.
167    last_allocated_var_len_index: usize,
168
169    /// The deferred large-blob insertions
170    /// with `Vec<u8>` being the blob bytes to insert to the blob store
171    /// and the `VarLenRef` being the destination to write the blob hash.
172    large_blob_insertions: Vec<(VarLenRef, Vec<u8>)>,
173
174    /// The mutable view of the variable section of the page.
175    var_view: VarView<'page>,
176}
177
178impl BflatnSerializedRowBuffer<'_> {
179    /// Rolls back all the var-len allocations made when writing the row.
180    ///
181    /// # Safety
182    ///
183    /// The `visitor` must be proper for the row type.
184    unsafe fn roll_back_var_len_allocations(&mut self, visitor: &impl VarLenMembers) {
185        // SAFETY:
186        // - `fixed_buf` is properly aligned for the row type
187        //    and `fixed_buf.len()` matches exactly the size of the row type.
188        // - `fixed_buf`'s `VarLenRef`s are initialized up to `last_allocated_var_len_index`.
189        // - `visitor` is proper for the row type.
190        let visitor_iter = unsafe { visitor.visit_var_len(self.fixed_buf) };
191        for vlr in visitor_iter.take(self.last_allocated_var_len_index) {
192            // SAFETY: The `vlr` came from the allocation in `write_var_len_obj`
193            // which wrote it to the fixed part using `write_var_len_ref`.
194            // Thus, it points to a valid `VarLenGranule`.
195            unsafe { self.var_view.free_object_ignore_blob(*vlr) };
196        }
197    }
198
199    /// Insert all large blobs into `blob_store` and their hashes to their granules.
200    fn write_large_blobs(mut self, blob_store: &mut dyn BlobStore) -> BlobNumBytes {
201        let mut blob_store_inserted_bytes = BlobNumBytes::default();
202        for (vlr, value) in self.large_blob_insertions {
203            // SAFETY: `vlr` was given to us by `alloc_for_slice`
204            // so it is properly aligned for a `VarLenGranule` and in bounds of the page.
205            // However, as it was added to `self.large_blob_insertions`,
206            // we have not yet written the hash to that granule.
207            unsafe {
208                blob_store_inserted_bytes += self.var_view.write_large_blob_hash_to_granule(blob_store, &value, vlr);
209            }
210        }
211        blob_store_inserted_bytes
212    }
213
214    /// Write an `val`, an [`AlgebraicValue`], typed at `ty`, to the buffer.
215    fn write_value(&mut self, ty: &AlgebraicTypeLayout, val: &AlgebraicValue) -> Result<(), Error> {
216        debug_assert_eq!(
217            self.curr_offset,
218            align_to(self.curr_offset, ty.align()),
219            "curr_offset {} insufficiently aligned for type {:#?}",
220            self.curr_offset,
221            val,
222        );
223
224        match (ty, val) {
225            // For sums, select the type based on the sum tag,
226            // write the variant data given the variant type,
227            // and finally write the tag.
228            (AlgebraicTypeLayout::Sum(ty), AlgebraicValue::Sum(val)) => self.write_sum(ty, val)?,
229            // For products, write every element in order.
230            (AlgebraicTypeLayout::Product(ty), AlgebraicValue::Product(val)) => self.write_product(ty.view(), val)?,
231
232            // For primitive types, write their contents by LE-encoding.
233            (&AlgebraicTypeLayout::Bool, AlgebraicValue::Bool(val)) => self.write_bool(*val),
234            // Integer types:
235            (&AlgebraicTypeLayout::I8, AlgebraicValue::I8(val)) => self.write_i8(*val),
236            (&AlgebraicTypeLayout::U8, AlgebraicValue::U8(val)) => self.write_u8(*val),
237            (&AlgebraicTypeLayout::I16, AlgebraicValue::I16(val)) => self.write_i16(*val),
238            (&AlgebraicTypeLayout::U16, AlgebraicValue::U16(val)) => self.write_u16(*val),
239            (&AlgebraicTypeLayout::I32, AlgebraicValue::I32(val)) => self.write_i32(*val),
240            (&AlgebraicTypeLayout::U32, AlgebraicValue::U32(val)) => self.write_u32(*val),
241            (&AlgebraicTypeLayout::I64, AlgebraicValue::I64(val)) => self.write_i64(*val),
242            (&AlgebraicTypeLayout::U64, AlgebraicValue::U64(val)) => self.write_u64(*val),
243            (&AlgebraicTypeLayout::I128, AlgebraicValue::I128(val)) => self.write_i128(val.0),
244            (&AlgebraicTypeLayout::U128, AlgebraicValue::U128(val)) => self.write_u128(val.0),
245            (&AlgebraicTypeLayout::I256, AlgebraicValue::I256(val)) => self.write_i256(**val),
246            (&AlgebraicTypeLayout::U256, AlgebraicValue::U256(val)) => self.write_u256(**val),
247            // Float types:
248            (&AlgebraicTypeLayout::F32, AlgebraicValue::F32(val)) => self.write_f32((*val).into()),
249            (&AlgebraicTypeLayout::F64, AlgebraicValue::F64(val)) => self.write_f64((*val).into()),
250
251            // For strings, we reserve space for a `VarLenRef`
252            // and push the bytes as a var-len object.
253            (&AlgebraicTypeLayout::String, AlgebraicValue::String(val)) => self.write_string(val)?,
254
255            // For array and maps, we reserve space for a `VarLenRef`
256            // and push the bytes, after BSATN encoding, as a var-len object.
257            (AlgebraicTypeLayout::VarLen(VarLenType::Array(_)), val @ AlgebraicValue::Array(_)) => {
258                self.write_av_bsatn(val)?
259            }
260
261            // If the type doesn't match the value, return an error.
262            (ty, val) => Err(Error::WrongType(ty.algebraic_type(), val.clone()))?,
263        }
264
265        Ok(())
266    }
267
268    /// Write a `val`, a [`SumValue`], typed at `ty`, to the buffer.
269    fn write_sum(&mut self, ty: &SumTypeLayout, val: &SumValue) -> Result<(), Error> {
270        // Extract sum value components and variant type, and offsets.
271        let SumValue { tag, ref value } = *val;
272        let variant_ty = &ty.variants[tag as usize];
273        let variant_offset = self.curr_offset + ty.offset_of_variant_data(tag);
274        let tag_offset = self.curr_offset + ty.offset_of_tag();
275
276        // Write the variant value at `variant_offset`.
277        self.curr_offset = variant_offset;
278        self.write_value(&variant_ty.ty, value)?;
279
280        // Write the variant value at `tag_offset`.
281        self.curr_offset = tag_offset;
282        self.write_u8(tag);
283
284        Ok(())
285    }
286
287    /// Write an `val`, a [`ProductValue`], typed at `ty`, to the buffer.
288    fn write_product(&mut self, ty: ProductTypeLayoutView<'_>, val: &ProductValue) -> Result<(), Error> {
289        // `Iterator::zip` silently drops elements if the two iterators have different lengths,
290        // so we need to check that our `ProductValue` has the same number of elements
291        // as our `ProductTypeLayout` to be sure it's typed correctly.
292        // Otherwise, if the value is too long, we'll discard its fields (whatever),
293        // or if it's too long, we'll leave some fields in the page "uninit"
294        // (actually valid-unconstrained) (very bad).
295        if ty.elements.len() != val.elements.len() {
296            return Err(Error::WrongType(
297                ty.algebraic_type(),
298                AlgebraicValue::Product(val.clone()),
299            ));
300        }
301
302        let base_offset = self.curr_offset;
303
304        for (elt_ty, elt) in ty.elements.iter().zip(val.elements.iter()) {
305            self.curr_offset = base_offset + elt_ty.offset as usize;
306            self.write_value(&elt_ty.ty, elt)?;
307        }
308        Ok(())
309    }
310
311    /// Write the string `str` to the var-len section
312    /// and a `VarLenRef` to the fixed buffer and advance the `curr_offset`.
313    fn write_string(&mut self, val: &str) -> Result<(), Error> {
314        let val = val.as_bytes();
315
316        // Write `val` to the page. The handle is `vlr`.
317        let (vlr, in_blob) = self.var_view.alloc_for_slice(val)?;
318        if in_blob {
319            self.defer_insert_large_blob(vlr, val.to_vec());
320        }
321
322        // Write `vlr` to the fixed part.
323        self.write_var_len_ref(vlr);
324        Ok(())
325    }
326
327    /// Write `val` BSATN-encoded to var-len section
328    /// and a `VarLenRef` to the fixed buffer and advance the `curr_offset`.
329    fn write_av_bsatn(&mut self, val: &AlgebraicValue) -> Result<(), Error> {
330        // Allocate space.
331        let len_in_bytes = bsatn_len(val);
332        let (vlr, in_blob) = self.var_view.alloc_for_len(len_in_bytes)?;
333
334        // Write `vlr` to the fixed part.
335        self.write_var_len_ref(vlr);
336
337        if in_blob {
338            // We won't be storing the large blob in the page,
339            // so no point in writing the blob directly to the page.
340            let mut bytes = Vec::with_capacity(len_in_bytes);
341            val.encode(&mut bytes);
342            self.defer_insert_large_blob(vlr, bytes);
343        } else {
344            // Write directly to the page.
345            // SAFETY: `vlr.first_granule` points to a granule
346            // even though the granule's data is not initialized as of yet.
347            // Note that the granule stores valid-unconstrained bytes (i.e. they are not uninit),
348            // but they may be leftovers from a previous allocation.
349            let iter = unsafe { self.var_view.granule_offset_iter(vlr.first_granule) };
350            let mut writer = GranuleBufWriter { buf: None, iter };
351            to_writer(&mut writer, val).unwrap();
352        }
353
354        /// A `BufWriter` that writes directly to a page.
355        struct GranuleBufWriter<'vv, 'page> {
356            /// The offset to the granule being written to
357            /// and how much has been written to it already.
358            buf: Option<(PageOffset, usize)>,
359            /// The iterator for the offsets to all the granule we'll write to.
360            iter: GranuleOffsetIter<'page, 'vv>,
361        }
362        impl BufWriter for GranuleBufWriter<'_, '_> {
363            fn put_slice(&mut self, mut slice: &[u8]) {
364                while !slice.is_empty() {
365                    let (offset, start) = match self.buf.take() {
366                        // Still have some to write to this granule.
367                        Some(buf @ (_, start)) if start < VarLenGranule::DATA_SIZE => buf,
368                        // First granule or the current one is full.
369                        _ => {
370                            let next = self.iter.next();
371                            debug_assert!(next.is_some());
372                            // SAFETY: The iterator length is exactly such that
373                            // `next.is_none() == slice.is_empty()`.
374                            let next = unsafe { next.unwrap_unchecked() };
375                            (next, 0)
376                        }
377                    };
378
379                    // Derive how much we can add to this granule
380                    // and only take that much from `slice`.
381                    let capacity_remains = VarLenGranule::DATA_SIZE - start;
382                    debug_assert!(capacity_remains > 0);
383                    let extend_len = capacity_remains.min(slice.len());
384                    let (extend_with, rest) = slice.split_at(extend_len);
385                    // The section of the granule data to write to.
386                    // SAFETY:
387                    // - `offset` came from `self.iter`, which only yields valid offsets.
388                    // - `start < VarLenGranule::DATA_SIZE` was ensured above.
389                    let write_to = unsafe { self.iter.get_mut_data(offset, start) };
390
391                    // Write to the granule.
392                    for (to, byte) in write_to.iter_mut().zip(extend_with) {
393                        *to = *byte;
394                    }
395
396                    slice = rest;
397                    self.buf = Some((offset, start + extend_len));
398                }
399            }
400        }
401
402        Ok(())
403    }
404
405    /// Write a `VarLenRef` to the fixed buffer and advance the `curr_offset`.
406    fn write_var_len_ref(&mut self, val: VarLenRef) {
407        self.write_u16(val.length_in_bytes);
408        self.write_u16(val.first_granule.0);
409
410        // Keep track of how many var len objects we've added so far
411        // so that we can free them on failure.
412        self.last_allocated_var_len_index += 1;
413    }
414
415    /// Defers the insertion of a large blob to the blob store as well as writing the hash to the granule.
416    fn defer_insert_large_blob(&mut self, vlr: VarLenRef, obj_bytes: Vec<u8>) {
417        self.large_blob_insertions.push((vlr, obj_bytes));
418    }
419
420    /// Write `bytes: &[u8; N]` starting at the current offset
421    /// and advance the offset by `N`.
422    fn write_bytes<const N: usize>(&mut self, bytes: &[u8; N]) {
423        self.fixed_buf[range_move(0..N, self.curr_offset)].copy_from_slice(bytes);
424        self.curr_offset += N;
425    }
426
427    /// Write a `u8` to the fixed buffer and advance the `curr_offset`.
428    fn write_u8(&mut self, val: u8) {
429        self.write_bytes(&[val]);
430    }
431
432    /// Write an `i8` to the fixed buffer and advance the `curr_offset`.
433    fn write_i8(&mut self, val: i8) {
434        self.write_u8(val as u8);
435    }
436
437    /// Write a `bool` to the fixed buffer and advance the `curr_offset`.
438    fn write_bool(&mut self, val: bool) {
439        self.write_u8(val as u8);
440    }
441
442    /// Write a `u16` to the fixed buffer and advance the `curr_offset`.
443    fn write_u16(&mut self, val: u16) {
444        self.write_bytes(&val.to_le_bytes());
445    }
446
447    /// Write an `i16` to the fixed buffer and advance the `curr_offset`.
448    fn write_i16(&mut self, val: i16) {
449        self.write_bytes(&val.to_le_bytes());
450    }
451
452    /// Write a `u32` to the fixed buffer and advance the `curr_offset`.
453    fn write_u32(&mut self, val: u32) {
454        self.write_bytes(&val.to_le_bytes());
455    }
456
457    /// Write an `i32` to the fixed buffer and advance the `curr_offset`.
458    fn write_i32(&mut self, val: i32) {
459        self.write_bytes(&val.to_le_bytes());
460    }
461
462    /// Write a `u64` to the fixed buffer and advance the `curr_offset`.
463    fn write_u64(&mut self, val: u64) {
464        self.write_bytes(&val.to_le_bytes());
465    }
466
467    /// Write an `i64` to the fixed buffer and advance the `curr_offset`.
468    fn write_i64(&mut self, val: i64) {
469        self.write_bytes(&val.to_le_bytes());
470    }
471
472    /// Write a `u128` to the fixed buffer and advance the `curr_offset`.
473    fn write_u128(&mut self, val: u128) {
474        self.write_bytes(&val.to_le_bytes());
475    }
476
477    /// Write an `i128` to the fixed buffer and advance the `curr_offset`.
478    fn write_i128(&mut self, val: i128) {
479        self.write_bytes(&val.to_le_bytes());
480    }
481
482    /// Write a `u256` to the fixed buffer and advance the `curr_offset`.
483    fn write_u256(&mut self, val: u256) {
484        self.write_bytes(&val.to_le_bytes());
485    }
486
487    /// Write an `i256` to the fixed buffer and advance the `curr_offset`.
488    fn write_i256(&mut self, val: i256) {
489        self.write_bytes(&val.to_le_bytes());
490    }
491
492    /// Write a `f32` to the fixed buffer and advance the `curr_offset`.
493    fn write_f32(&mut self, val: f32) {
494        self.write_bytes(&val.to_le_bytes());
495    }
496
497    /// Write a `f64` to the fixed buffer and advance the `curr_offset`.
498    fn write_f64(&mut self, val: f64) {
499        self.write_bytes(&val.to_le_bytes());
500    }
501}
502
503/// Counts the number of [`VarLenGranule`] allocations required to store `val` in a page.
504fn required_var_len_granules_for_row(val: &ProductValue) -> usize {
505    fn traverse_av(val: &AlgebraicValue, count: &mut usize) {
506        match val {
507            AlgebraicValue::Product(val) => traverse_product(val, count),
508            AlgebraicValue::Sum(val) => traverse_av(&val.value, count),
509            AlgebraicValue::Array(_) => add_for_bytestring(bsatn_len(val), count),
510            AlgebraicValue::String(val) => add_for_bytestring(val.len(), count),
511            _ => (),
512        }
513    }
514
515    fn traverse_product(val: &ProductValue, count: &mut usize) {
516        for elt in val {
517            traverse_av(elt, count);
518        }
519    }
520
521    fn add_for_bytestring(len_in_bytes: usize, count: &mut usize) {
522        *count += VarLenGranule::bytes_to_granules(len_in_bytes).0;
523    }
524
525    let mut required_granules: usize = 0;
526    traverse_product(val, &mut required_granules);
527    required_granules
528}
529
530/// Computes the size of `val` when BSATN encoding without actually encoding.
531fn bsatn_len(val: &AlgebraicValue) -> usize {
532    // We store arrays and maps BSATN-encoded,
533    // so we need to go through BSATN encoding to determine the size of the resulting byte blob,
534    // but we don't actually need that byte blob in this calculation,
535    // instead, we can just count them as a serialization format.
536    bsatn::to_len(val).unwrap()
537}
538
539#[cfg(test)]
540pub mod test {
541    use super::*;
542    use crate::{
543        bflatn_from::serialize_row_from_page, blob_store::HashMapBlobStore, page::tests::hash_unmodified_save_get,
544        row_type_visitor::row_type_visitor,
545    };
546    use proptest::{prelude::*, prop_assert_eq, proptest};
547    use spacetimedb_sats::algebraic_value::ser::ValueSerializer;
548    use spacetimedb_sats::proptest::generate_typed_row;
549
550    proptest! {
551        #![proptest_config(ProptestConfig::with_cases(if cfg!(miri) { 8 } else { 2048 }))]
552        #[test]
553        fn av_serde_round_trip_through_page((ty, val) in generate_typed_row()) {
554            let ty: RowTypeLayout = ty.into();
555            let mut page = Page::new(ty.size());
556            let visitor = row_type_visitor(&ty);
557            let blob_store = &mut HashMapBlobStore::default();
558
559            let hash_pre_ins = hash_unmodified_save_get(&mut page);
560
561            let (offset, _) = unsafe { write_row_to_page(&mut page, blob_store, &visitor, &ty, &val).unwrap() };
562
563            let hash_pre_ser = hash_unmodified_save_get(&mut page);
564            assert_ne!(hash_pre_ins, hash_pre_ser);
565
566            let read_val = unsafe { serialize_row_from_page(ValueSerializer, &page, blob_store, offset, &ty) }
567                .unwrap().into_product().unwrap();
568
569            prop_assert_eq!(val, read_val);
570            assert_eq!(hash_pre_ser, *page.unmodified_hash().unwrap());
571        }
572    }
573}