Skip to main content

commonware_storage/qmdb/current/unordered/
variable.rs

1//! An _unordered_ variant of a [crate::qmdb::current] authenticated database for variable-size
2//! values.
3//!
4//! This variant does not maintain key ordering, so it cannot generate exclusion proofs. Use
5//! [crate::qmdb::current::ordered::variable] if exclusion proofs are required.
6//!
7//! See [Db] for the main database type.
8
9pub use super::db::KeyValueProof;
10use crate::{
11    bitmap::MerkleizedBitMap,
12    journal::contiguous::variable::Journal,
13    mmr::{Location, StandardHasher},
14    qmdb::{
15        any::{
16            unordered::variable::{Db as AnyDb, Operation},
17            value::VariableEncoding,
18            VariableValue,
19        },
20        current::{
21            db::{merkleize_grafted_bitmap, root, Merkleized},
22            VariableConfig as Config,
23        },
24        Durable, Error,
25    },
26    translator::Translator,
27};
28use commonware_codec::{FixedSize, Read};
29use commonware_cryptography::{DigestOf, Hasher};
30use commonware_runtime::{Clock, Metrics, Storage as RStorage};
31use commonware_utils::Array;
32
33pub type Db<E, K, V, H, T, const N: usize, S = Merkleized<DigestOf<H>>, D = Durable> =
34    super::db::Db<E, Journal<E, Operation<K, V>>, K, VariableEncoding<V>, H, T, N, S, D>;
35
36// Functionality for the Merkleized state - init only.
37impl<
38        E: RStorage + Clock + Metrics,
39        K: Array,
40        V: VariableValue,
41        H: Hasher,
42        T: Translator,
43        const N: usize,
44    > Db<E, K, V, H, T, N, Merkleized<DigestOf<H>>, Durable>
45where
46    Operation<K, V>: Read,
47{
48    /// Initializes a [Db] from the given `config`. Leverages parallel Merkleization to initialize
49    /// the bitmap MMR if a thread pool is provided.
50    pub async fn init(
51        context: E,
52        config: Config<T, <Operation<K, V> as Read>::Cfg>,
53    ) -> Result<Self, Error> {
54        // TODO: Re-evaluate assertion placement after `generic_const_exprs` is stable.
55        const {
56            // A compile-time assertion that the chunk size is some multiple of digest size. A
57            // multiple of 1 is optimal with respect to proof size, but a higher multiple allows for
58            // a smaller (RAM resident) merkle tree over the structure.
59            assert!(
60                N.is_multiple_of(H::Digest::SIZE),
61                "chunk size must be some multiple of the digest size",
62            );
63            // A compile-time assertion that chunk size is a power of 2, which is necessary to allow
64            // the status bitmap tree to be aligned with the underlying operations MMR.
65            assert!(N.is_power_of_two(), "chunk size must be a power of 2");
66        }
67
68        let thread_pool = config.thread_pool.clone();
69        let bitmap_metadata_partition = config.bitmap_metadata_partition.clone();
70
71        let mut hasher = StandardHasher::<H>::new();
72        let mut status = MerkleizedBitMap::init(
73            context.with_label("bitmap"),
74            &bitmap_metadata_partition,
75            thread_pool,
76            &mut hasher,
77        )
78        .await?
79        .into_dirty();
80
81        // Initialize the anydb with a callback that initializes the status bitmap.
82        let last_known_inactivity_floor = Location::new_unchecked(status.len());
83        let any = AnyDb::init_with_callback(
84            context.with_label("any"),
85            config.into(),
86            Some(last_known_inactivity_floor),
87            |append: bool, loc: Option<Location>| {
88                status.push(append);
89                if let Some(loc) = loc {
90                    status.set_bit(*loc, false);
91                }
92            },
93        )
94        .await?;
95
96        let status = merkleize_grafted_bitmap(&mut hasher, status, &any.log.mmr).await?;
97
98        // Compute and cache the root
99        let root = root(&mut hasher, &status, &any.log.mmr).await?;
100
101        Ok(Self {
102            any,
103            status,
104            state: Merkleized { root },
105        })
106    }
107}
108
109#[cfg(test)]
110mod test {
111    use crate::{
112        bitmap::MerkleizedBitMap,
113        kv::tests::{assert_batchable, assert_deletable, assert_gettable, assert_send},
114        mmr::{hasher::Hasher as _, Location, Proof, StandardHasher},
115        qmdb::{
116            any::unordered::variable::Operation,
117            current::{
118                db::{Merkleized, Unmerkleized},
119                proof::RangeProof,
120                tests::{self, apply_random_ops},
121                unordered::{db::KeyValueProof, variable::Db},
122                VariableConfig as Config,
123            },
124            store::{
125                batch_tests,
126                tests::{assert_log_store, assert_merkleized_store, assert_prunable_store},
127                LogStore as _,
128            },
129            Durable, Error, NonDurable,
130        },
131        translator::TwoCap,
132    };
133    use commonware_cryptography::{sha256::Digest, Hasher as _, Sha256};
134    use commonware_macros::test_traced;
135    use commonware_runtime::{buffer::paged::CacheRef, deterministic, Metrics as _, Runner as _};
136    use commonware_utils::{NZUsize, NZU16, NZU64};
137    use rand::RngCore;
138    use std::num::{NonZeroU16, NonZeroUsize};
139
140    const PAGE_SIZE: NonZeroU16 = NZU16!(88);
141    const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(8);
142
143    fn current_db_config(partition_prefix: &str) -> Config<TwoCap, ()> {
144        Config {
145            mmr_journal_partition: format!("{partition_prefix}_journal_partition"),
146            mmr_metadata_partition: format!("{partition_prefix}_metadata_partition"),
147            mmr_items_per_blob: NZU64!(11),
148            mmr_write_buffer: NZUsize!(1024),
149            log_partition: format!("{partition_prefix}_log_partition"),
150            log_items_per_blob: NZU64!(7),
151            log_write_buffer: NZUsize!(1024),
152            log_compression: None,
153            log_codec_config: (),
154            bitmap_metadata_partition: format!("{partition_prefix}_bitmap_metadata_partition"),
155            translator: TwoCap,
156            thread_pool: None,
157            page_cache: CacheRef::new(PAGE_SIZE, PAGE_CACHE_SIZE),
158        }
159    }
160
161    /// A type alias for the concrete [Db] type used in these unit tests (Merkleized, Durable).
162    type CleanCurrentTest =
163        Db<deterministic::Context, Digest, Digest, Sha256, TwoCap, 32, Merkleized<Digest>, Durable>;
164
165    /// A type alias for the Mutable (Unmerkleized, NonDurable) variant of CurrentTest.
166    type MutableCurrentTest =
167        Db<deterministic::Context, Digest, Digest, Sha256, TwoCap, 32, Unmerkleized, NonDurable>;
168
169    /// Return a [Db] database initialized with a fixed config.
170    async fn open_db(
171        context: deterministic::Context,
172        partition_prefix: String,
173    ) -> CleanCurrentTest {
174        CleanCurrentTest::init(context, current_db_config(&partition_prefix))
175            .await
176            .unwrap()
177    }
178
179    #[test_traced("DEBUG")]
180    pub fn test_current_db_build_small_close_reopen() {
181        super::super::tests::test_build_small_close_reopen::<CleanCurrentTest, _, _>(open_db);
182    }
183
184    #[test_traced("WARN")]
185    fn test_current_db_build_big() {
186        // Expected values after commit + merkleize + prune for unordered variant.
187        tests::test_current_db_build_big::<CleanCurrentTest, _, _>(open_db, 1957, 838);
188    }
189
190    // Test that merkleization state changes don't reset `steps`.
191    #[test_traced("DEBUG")]
192    fn test_current_unordered_variable_db_steps_not_reset() {
193        let executor = deterministic::Runner::default();
194        executor.start(|context| async move {
195            let db = open_db(context, "steps_test".to_string()).await;
196            crate::qmdb::any::test::test_any_db_steps_not_reset(db).await;
197        });
198    }
199
200    /// Build a tiny database and make sure we can't convince the verifier that some old value of a
201    /// key is active. We specifically test over the partial chunk case, since these bits are yet to
202    /// be committed to the underlying MMR.
203    #[test_traced("DEBUG")]
204    pub fn test_current_db_verify_proof_over_bits_in_uncommitted_chunk() {
205        let executor = deterministic::Runner::default();
206        executor.start(|context| async move {
207            let mut hasher = StandardHasher::<Sha256>::new();
208            let partition = "build_small".to_string();
209            let mut db = open_db(context.with_label("uncommitted_chunk"), partition.clone())
210                .await
211                .into_mutable();
212
213            // Add one key.
214            let k = Sha256::fill(0x01);
215            let v1 = Sha256::fill(0xA1);
216            db.update(k, v1).await.unwrap();
217            let (db, _) = db.commit(None).await.unwrap();
218            let db = db.into_merkleized().await.unwrap();
219
220            let (_, op_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
221            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
222
223            // Proof should be verifiable against current root.
224            let root = db.root();
225            assert!(CleanCurrentTest::verify_key_value_proof(
226                hasher.inner(),
227                k,
228                v1,
229                &proof,
230                &root
231            ));
232
233            let v2 = Sha256::fill(0xA2);
234            // Proof should not verify against a different value.
235            assert!(!CleanCurrentTest::verify_key_value_proof(
236                hasher.inner(),
237                k,
238                v2,
239                &proof,
240                &root,
241            ));
242
243            // Update the key to a new value (v2), which inactivates the previous operation.
244            let mut db = db.into_mutable();
245            db.update(k, v2).await.unwrap();
246            let (db, _) = db.commit(None).await.unwrap();
247            let db = db.into_merkleized().await.unwrap();
248            let root = db.root();
249
250            // New value should not be verifiable against the old proof.
251            assert!(!CleanCurrentTest::verify_key_value_proof(
252                hasher.inner(),
253                k,
254                v2,
255                &proof,
256                &root,
257            ));
258
259            // But the new value should verify against a new proof.
260            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
261            assert!(CleanCurrentTest::verify_key_value_proof(
262                hasher.inner(),
263                k,
264                v2,
265                &proof,
266                &root,
267            ));
268
269            // Old value will not verify against new proof.
270            assert!(!CleanCurrentTest::verify_key_value_proof(
271                hasher.inner(),
272                k,
273                v1,
274                &proof,
275                &root,
276            ));
277
278            // Create a proof of the now-inactive update operation assigning v1 to k against the
279            // current root.
280            let (range_proof, _, chunks) = db
281                .range_proof(hasher.inner(), op_loc, NZU64!(1))
282                .await
283                .unwrap();
284            let proof_inactive = KeyValueProof {
285                loc: op_loc,
286                chunk: chunks[0],
287                range_proof,
288            };
289            // This proof should verify using verify_range_proof which does not check activity
290            // status.
291            let op = Operation::Update(crate::qmdb::any::operation::update::Unordered(k, v1));
292            assert!(CleanCurrentTest::verify_range_proof(
293                hasher.inner(),
294                &proof_inactive.range_proof,
295                proof_inactive.loc,
296                &[op],
297                &[proof_inactive.chunk],
298                &root,
299            ));
300
301            // But this proof should *not* verify as a key value proof, since verification will see
302            // that the operation is inactive.
303            assert!(!CleanCurrentTest::verify_key_value_proof(
304                hasher.inner(),
305                k,
306                v1,
307                &proof_inactive,
308                &root,
309            ));
310
311            // Attempt #1 to "fool" the verifier:  change the location to that of an active
312            // operation. This should not fool the verifier if we're properly validating the
313            // inclusion of the operation itself, and not just the chunk.
314            let (_, active_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
315            // The new location should differ but still be in the same chunk.
316            assert_ne!(active_loc, proof_inactive.loc);
317            assert_eq!(
318                MerkleizedBitMap::<deterministic::Context, Digest, 32>::leaf_pos(*active_loc),
319                MerkleizedBitMap::<deterministic::Context, Digest, 32>::leaf_pos(
320                    *proof_inactive.loc
321                )
322            );
323            let mut fake_proof = proof_inactive.clone();
324            fake_proof.loc = active_loc;
325            assert!(!CleanCurrentTest::verify_key_value_proof(
326                hasher.inner(),
327                k,
328                v1,
329                &fake_proof,
330                &root,
331            ));
332
333            // Attempt #2 to "fool" the verifier: Modify the chunk in the proof info to make it look
334            // like the operation is active by flipping its corresponding bit to 1. This should not
335            // fool the verifier if we are correctly incorporating the partial chunk information
336            // into the root computation.
337            let mut modified_chunk = proof_inactive.chunk;
338            let bit_pos = *proof_inactive.loc;
339            let byte_idx = bit_pos / 8;
340            let bit_idx = bit_pos % 8;
341            modified_chunk[byte_idx as usize] |= 1 << bit_idx;
342
343            let mut fake_proof = proof_inactive.clone();
344            fake_proof.chunk = modified_chunk;
345            assert!(!CleanCurrentTest::verify_key_value_proof(
346                hasher.inner(),
347                k,
348                v1,
349                &fake_proof,
350                &root,
351            ));
352
353            db.destroy().await.unwrap();
354        });
355    }
356
357    #[test_traced("DEBUG")]
358    pub fn test_current_db_range_proofs() {
359        let executor = deterministic::Runner::default();
360        executor.start(|mut context| async move {
361            let partition = "range_proofs".to_string();
362            let mut hasher = StandardHasher::<Sha256>::new();
363            let db = open_db(context.with_label("first"), partition.clone()).await;
364            let root = db.root();
365
366            // Empty range proof should not crash or verify, since even an empty db has a single
367            // commit op.
368            let proof = RangeProof {
369                proof: Proof::default(),
370                partial_chunk_digest: None,
371            };
372            assert!(!CleanCurrentTest::verify_range_proof(
373                hasher.inner(),
374                &proof,
375                Location::new_unchecked(0),
376                &[],
377                &[],
378                &root,
379            ));
380
381            let db = apply_random_ops::<CleanCurrentTest>(
382                200,
383                true,
384                context.next_u64(),
385                db.into_mutable(),
386            )
387            .await
388            .unwrap();
389            let (db, _) = db.commit(None).await.unwrap();
390            let db = db.into_merkleized().await.unwrap();
391            let root = db.root();
392
393            // Make sure size-constrained batches of operations are provable from the oldest
394            // retained op to tip.
395            let max_ops = 4;
396            let end_loc = db.size();
397            let start_loc = db.any.inactivity_floor_loc();
398
399            for loc in *start_loc..*end_loc {
400                let loc = Location::new_unchecked(loc);
401                let (proof, ops, chunks) = db
402                    .range_proof(hasher.inner(), loc, NZU64!(max_ops))
403                    .await
404                    .unwrap();
405                assert!(
406                    CleanCurrentTest::verify_range_proof(
407                        hasher.inner(),
408                        &proof,
409                        loc,
410                        &ops,
411                        &chunks,
412                        &root
413                    ),
414                    "failed to verify range at start_loc {start_loc}",
415                );
416                // Proof should not verify if we include extra chunks.
417                let mut chunks_with_extra = chunks.clone();
418                chunks_with_extra.push(chunks[chunks.len() - 1]);
419                assert!(!CleanCurrentTest::verify_range_proof(
420                    hasher.inner(),
421                    &proof,
422                    loc,
423                    &ops,
424                    &chunks_with_extra,
425                    &root,
426                ));
427            }
428
429            db.destroy().await.unwrap();
430        });
431    }
432
433    #[test_traced("DEBUG")]
434    pub fn test_current_db_key_value_proof() {
435        let executor = deterministic::Runner::default();
436        executor.start(|mut context| async move {
437            let partition = "range_proofs".to_string();
438            let mut hasher = StandardHasher::<Sha256>::new();
439            let db = open_db(context.clone(), partition.clone())
440                .await
441                .into_mutable();
442            let db = apply_random_ops::<CleanCurrentTest>(500, true, context.next_u64(), db)
443                .await
444                .unwrap();
445            let (db, _) = db.commit(None).await.unwrap();
446            let db = db.into_merkleized().await.unwrap();
447            let root = db.root();
448
449            // Confirm bad keys produce the expected error.
450            let bad_key = Sha256::fill(0xAA);
451            let res = db.key_value_proof(hasher.inner(), bad_key).await;
452            assert!(matches!(res, Err(Error::KeyNotFound)));
453
454            let start = *db.inactivity_floor_loc();
455            for i in start..db.status.len() {
456                if !db.status.get_bit(i) {
457                    continue;
458                }
459                // Found an active operation! Create a proof for its active current key/value if
460                // it's a key-updating operation.
461                let (key, value) = match db.any.log.read(Location::new_unchecked(i)).await.unwrap()
462                {
463                    Operation::Update(crate::qmdb::any::operation::update::Unordered(
464                        key,
465                        value,
466                    )) => (key, value),
467                    Operation::CommitFloor(_, _) => continue,
468                    Operation::Delete(_) => {
469                        unreachable!("location does not reference update/commit operation")
470                    }
471                };
472
473                let proof = db.key_value_proof(hasher.inner(), key).await.unwrap();
474                // Proof should validate against the current value and correct root.
475                assert!(CleanCurrentTest::verify_key_value_proof(
476                    hasher.inner(),
477                    key,
478                    value,
479                    &proof,
480                    &root
481                ));
482                // Proof should fail against the wrong value. Use hash instead of fill to ensure
483                // the value differs from any key/value created by TestKey::from_seed (which uses
484                // fill patterns).
485                let wrong_val = Sha256::hash(&[0xFF]);
486                assert!(!CleanCurrentTest::verify_key_value_proof(
487                    hasher.inner(),
488                    key,
489                    wrong_val,
490                    &proof,
491                    &root
492                ));
493                // Proof should fail against the wrong key.
494                let wrong_key = Sha256::hash(&[0xEE]);
495                assert!(!CleanCurrentTest::verify_key_value_proof(
496                    hasher.inner(),
497                    wrong_key,
498                    value,
499                    &proof,
500                    &root
501                ));
502                // Proof should fail against the wrong root.
503                let wrong_root = Sha256::hash(&[0xDD]);
504                assert!(!CleanCurrentTest::verify_key_value_proof(
505                    hasher.inner(),
506                    key,
507                    value,
508                    &proof,
509                    &wrong_root,
510                ));
511            }
512
513            db.destroy().await.unwrap();
514        });
515    }
516
517    /// This test builds a random database, and makes sure that its state is correctly restored
518    /// after closing and re-opening.
519    #[test_traced("WARN")]
520    pub fn test_current_db_build_random_close_reopen() {
521        crate::qmdb::current::tests::test_build_random_close_reopen(open_db);
522    }
523
524    /// Repeatedly update the same key to a new value and ensure we can prove its current value
525    /// after each update.
526    #[test_traced("WARN")]
527    pub fn test_current_db_proving_repeated_updates() {
528        let executor = deterministic::Runner::default();
529        executor.start(|context| async move {
530            let mut hasher = StandardHasher::<Sha256>::new();
531            let partition = "build_small".to_string();
532            let mut db = open_db(context.clone(), partition.clone()).await;
533
534            // Add one key.
535            let k = Sha256::fill(0x00);
536            let mut old_val = Sha256::fill(0x00);
537            for i in 1u8..=255 {
538                let v = Sha256::fill(i);
539                let mut dirty_db = db.into_mutable();
540                dirty_db.update(k, v).await.unwrap();
541                assert_eq!(dirty_db.get(&k).await.unwrap().unwrap(), v);
542                let (durable_db, _) = dirty_db.commit(None).await.unwrap();
543                db = durable_db.into_merkleized().await.unwrap();
544                let root = db.root();
545
546                // Create a proof for the current value of k.
547                let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
548                assert!(
549                    CleanCurrentTest::verify_key_value_proof(hasher.inner(), k, v, &proof, &root),
550                    "proof of update {i} failed to verify"
551                );
552                // Ensure the proof does NOT verify if we use the previous value.
553                assert!(
554                    !CleanCurrentTest::verify_key_value_proof(
555                        hasher.inner(),
556                        k,
557                        old_val,
558                        &proof,
559                        &root
560                    ),
561                    "proof of update {i} verified when it should not have"
562                );
563                old_val = v;
564            }
565
566            db.destroy().await.unwrap();
567        });
568    }
569
570    /// This test builds a random database and simulates we can recover from different types of
571    /// failure scenarios.
572    #[test_traced("WARN")]
573    pub fn test_current_db_simulate_write_failures() {
574        crate::qmdb::current::tests::test_simulate_write_failures(open_db);
575    }
576
577    #[test_traced("WARN")]
578    pub fn test_current_db_different_pruning_delays_same_root() {
579        tests::test_different_pruning_delays_same_root::<CleanCurrentTest, _, _>(open_db);
580    }
581
582    #[test_traced("WARN")]
583    pub fn test_current_db_sync_persists_bitmap_pruning_boundary() {
584        tests::test_sync_persists_bitmap_pruning_boundary::<CleanCurrentTest, _, _>(open_db);
585    }
586
587    #[test_traced("DEBUG")]
588    fn test_batch() {
589        batch_tests::test_batch(|mut ctx| async move {
590            let seed = ctx.next_u64();
591            let prefix = format!("current_unordered_variable_batch_{seed}");
592            open_db(ctx, prefix).await.into_mutable()
593        });
594    }
595
596    #[allow(dead_code)]
597    fn assert_merkleized_db_futures_are_send(
598        db: &mut CleanCurrentTest,
599        key: Digest,
600        loc: Location,
601    ) {
602        assert_gettable(db, &key);
603        assert_log_store(db);
604        assert_prunable_store(db, loc);
605        assert_merkleized_store(db, loc);
606        assert_send(db.sync());
607    }
608
609    #[allow(dead_code)]
610    fn assert_mutable_db_futures_are_send(db: &mut MutableCurrentTest, key: Digest, value: Digest) {
611        assert_gettable(db, &key);
612        assert_log_store(db);
613        assert_send(db.update(key, value));
614        assert_send(db.create(key, value));
615        assert_deletable(db, key);
616        assert_batchable(db, key, value);
617    }
618
619    #[allow(dead_code)]
620    fn assert_mutable_db_commit_is_send(db: MutableCurrentTest) {
621        assert_send(db.commit(None));
622    }
623}