Skip to main content

commonware_storage/qmdb/current/unordered/
fixed.rs

1//! An _unordered_ variant of a [crate::qmdb::current] authenticated database optimized for
2//! fixed-size values.
3//!
4//! This variant does not maintain key ordering, so it cannot generate exclusion proofs. Use
5//! [super::super::ordered::fixed] if exclusion proofs are required.
6//!
7//! See [Db] for the main database type.
8
9pub use super::db::KeyValueProof;
10use crate::{
11    bitmap::MerkleizedBitMap,
12    journal::contiguous::fixed::Journal,
13    mmr::{Location, StandardHasher},
14    qmdb::{
15        any::{
16            unordered::fixed::{Db as AnyDb, Operation},
17            value::FixedEncoding,
18            FixedValue,
19        },
20        current::{
21            db::{merkleize_grafted_bitmap, root, Merkleized},
22            FixedConfig as Config,
23        },
24        Durable, Error,
25    },
26    translator::Translator,
27};
28use commonware_codec::FixedSize;
29use commonware_cryptography::{DigestOf, Hasher};
30use commonware_runtime::{Clock, Metrics, Storage as RStorage};
31use commonware_utils::Array;
32
33/// A specialization of [super::db::Db] for unordered key spaces and fixed-size values.
34pub type Db<E, K, V, H, T, const N: usize, S = Merkleized<DigestOf<H>>, D = Durable> =
35    super::db::Db<E, Journal<E, Operation<K, V>>, K, FixedEncoding<V>, H, T, N, S, D>;
36
37// Functionality for the Merkleized state - init only.
38impl<
39        E: RStorage + Clock + Metrics,
40        K: Array,
41        V: FixedValue,
42        H: Hasher,
43        T: Translator,
44        const N: usize,
45    > Db<E, K, V, H, T, N, Merkleized<DigestOf<H>>, Durable>
46{
47    /// Initializes a [Db] authenticated database from the given `config`. Leverages parallel
48    /// Merkleization to initialize the bitmap MMR if a thread pool is provided.
49    pub async fn init(context: E, config: Config<T>) -> Result<Self, Error> {
50        // TODO: Re-evaluate assertion placement after `generic_const_exprs` is stable.
51        const {
52            // A compile-time assertion that the chunk size is some multiple of digest size. A
53            // multiple of 1 is optimal with respect to proof size, but a higher multiple allows for
54            // a smaller (RAM resident) merkle tree over the structure.
55            assert!(
56                N.is_multiple_of(H::Digest::SIZE),
57                "chunk size must be some multiple of the digest size",
58            );
59            // A compile-time assertion that chunk size is a power of 2, which is necessary to allow
60            // the status bitmap tree to be aligned with the underlying operations MMR.
61            assert!(N.is_power_of_two(), "chunk size must be a power of 2");
62        }
63
64        let thread_pool = config.thread_pool.clone();
65        let bitmap_metadata_partition = config.bitmap_metadata_partition.clone();
66
67        let mut hasher = StandardHasher::<H>::new();
68        let mut status = MerkleizedBitMap::init(
69            context.with_label("bitmap"),
70            &bitmap_metadata_partition,
71            thread_pool,
72            &mut hasher,
73        )
74        .await?
75        .into_dirty();
76
77        // Initialize the anydb with a callback that initializes the status bitmap.
78        let last_known_inactivity_floor = Location::new_unchecked(status.len());
79        let any = AnyDb::init_with_callback(
80            context.with_label("any"),
81            config.into(),
82            Some(last_known_inactivity_floor),
83            |append: bool, loc: Option<Location>| {
84                status.push(append);
85                if let Some(loc) = loc {
86                    status.set_bit(*loc, false);
87                }
88            },
89        )
90        .await?;
91
92        let status = merkleize_grafted_bitmap(&mut hasher, status, &any.log.mmr).await?;
93
94        // Compute and cache the root
95        let root = root(&mut hasher, &status, &any.log.mmr).await?;
96
97        Ok(Self {
98            any,
99            status,
100            state: Merkleized { root },
101        })
102    }
103}
104
105#[cfg(test)]
106pub mod test {
107    use super::*;
108    use crate::{
109        kv::tests::{assert_batchable, assert_deletable, assert_gettable, assert_send},
110        mmr::{hasher::Hasher as _, Proof},
111        qmdb::{
112            any::operation::update::Unordered as UnorderedUpdate,
113            current::{
114                db::Unmerkleized,
115                proof::RangeProof,
116                tests::{self, apply_random_ops},
117            },
118            store::{
119                batch_tests,
120                tests::{assert_log_store, assert_merkleized_store, assert_prunable_store},
121                LogStore as _,
122            },
123            NonDurable,
124        },
125        translator::TwoCap,
126    };
127    use commonware_cryptography::{sha256::Digest, Sha256};
128    use commonware_macros::test_traced;
129    use commonware_runtime::{buffer::paged::CacheRef, deterministic, Runner as _};
130    use commonware_utils::{NZUsize, NZU16, NZU64};
131    use rand::RngCore;
132    use std::num::{NonZeroU16, NonZeroUsize};
133
134    const PAGE_SIZE: NonZeroU16 = NZU16!(88);
135    const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(8);
136
137    fn current_db_config(partition_prefix: &str) -> Config<TwoCap> {
138        Config {
139            mmr_journal_partition: format!("{partition_prefix}_journal_partition"),
140            mmr_metadata_partition: format!("{partition_prefix}_metadata_partition"),
141            mmr_items_per_blob: NZU64!(11),
142            mmr_write_buffer: NZUsize!(1024),
143            log_journal_partition: format!("{partition_prefix}_partition_prefix"),
144            log_items_per_blob: NZU64!(7),
145            log_write_buffer: NZUsize!(1024),
146            bitmap_metadata_partition: format!("{partition_prefix}_bitmap_metadata_partition"),
147            translator: TwoCap,
148            thread_pool: None,
149            page_cache: CacheRef::new(PAGE_SIZE, PAGE_CACHE_SIZE),
150        }
151    }
152
153    /// A type alias for the concrete merkleized [Db] type used in these unit tests.
154    type CleanCurrentTest = Db<deterministic::Context, Digest, Digest, Sha256, TwoCap, 32>;
155
156    /// A type alias for the concrete mutable [Db] type used in these unit tests.
157    type MutableCurrentTest =
158        Db<deterministic::Context, Digest, Digest, Sha256, TwoCap, 32, Unmerkleized, NonDurable>;
159
160    /// Return an [Db] database initialized with a fixed config.
161    async fn open_db(
162        context: deterministic::Context,
163        partition_prefix: String,
164    ) -> CleanCurrentTest {
165        CleanCurrentTest::init(context, current_db_config(&partition_prefix))
166            .await
167            .unwrap()
168    }
169
170    #[test_traced("DEBUG")]
171    pub fn test_current_db_build_small_close_reopen() {
172        super::super::tests::test_build_small_close_reopen::<CleanCurrentTest, _, _>(open_db);
173    }
174
175    #[test_traced("WARN")]
176    fn test_current_db_build_big() {
177        // Expected values after commit + merkleize + prune for unordered variant.
178        tests::test_current_db_build_big::<CleanCurrentTest, _, _>(open_db, 1957, 838);
179    }
180
181    // Test that merkleization state changes don't reset `steps`.
182    #[test_traced("DEBUG")]
183    fn test_current_unordered_fixed_db_steps_not_reset() {
184        let executor = deterministic::Runner::default();
185        executor.start(|context| async move {
186            let db = open_db(context, "steps_test".to_string()).await;
187            crate::qmdb::any::test::test_any_db_steps_not_reset(db).await;
188        });
189    }
190
191    /// Build a tiny database and make sure we can't convince the verifier that some old value of a
192    /// key is active. We specifically test over the partial chunk case, since these bits are yet to
193    /// be committed to the underlying MMR.
194    #[test_traced("DEBUG")]
195    pub fn test_current_db_verify_proof_over_bits_in_uncommitted_chunk() {
196        let executor = deterministic::Runner::default();
197        executor.start(|context| async move {
198            let mut hasher = StandardHasher::<Sha256>::new();
199            let partition = "build_small".to_string();
200            let mut db = open_db(context.with_label("db"), partition.clone())
201                .await
202                .into_mutable();
203
204            // Add one key.
205            let k = Sha256::fill(0x01);
206            let v1 = Sha256::fill(0xA1);
207            db.update(k, v1).await.unwrap();
208            let (db, _) = db.commit(None).await.unwrap();
209            let db = db.into_merkleized().await.unwrap();
210
211            let (_, op_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
212            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
213
214            // Proof should be verifiable against current root.
215            let root = db.root();
216            assert!(CleanCurrentTest::verify_key_value_proof(
217                hasher.inner(),
218                k,
219                v1,
220                &proof,
221                &root
222            ));
223
224            let v2 = Sha256::fill(0xA2);
225            // Proof should not verify against a different value.
226            assert!(!CleanCurrentTest::verify_key_value_proof(
227                hasher.inner(),
228                k,
229                v2,
230                &proof,
231                &root,
232            ));
233
234            // Update the key to a new value (v2), which inactivates the previous operation.
235            let mut db = db.into_mutable();
236            db.update(k, v2).await.unwrap();
237            let (db, _) = db.commit(None).await.unwrap();
238            let db = db.into_merkleized().await.unwrap();
239            let root = db.root();
240
241            // New value should not be verifiable against the old proof.
242            assert!(!CleanCurrentTest::verify_key_value_proof(
243                hasher.inner(),
244                k,
245                v2,
246                &proof,
247                &root,
248            ));
249
250            // But the new value should verify against a new proof.
251            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
252            assert!(CleanCurrentTest::verify_key_value_proof(
253                hasher.inner(),
254                k,
255                v2,
256                &proof,
257                &root,
258            ));
259
260            // Old value will not verify against new proof.
261            assert!(!CleanCurrentTest::verify_key_value_proof(
262                hasher.inner(),
263                k,
264                v1,
265                &proof,
266                &root,
267            ));
268
269            // Create a proof of the now-inactive update operation assigining v1 to k against the
270            // current root.
271            let (range_proof, _, chunks) = db
272                .range_proof(hasher.inner(), op_loc, NZU64!(1))
273                .await
274                .unwrap();
275            let proof_inactive = KeyValueProof {
276                loc: op_loc,
277                chunk: chunks[0],
278                range_proof,
279            };
280            // This proof should verify using verify_range_proof which does not check activity
281            // status.
282            let op = Operation::Update(UnorderedUpdate(k, v1));
283            assert!(CleanCurrentTest::verify_range_proof(
284                hasher.inner(),
285                &proof_inactive.range_proof,
286                proof_inactive.loc,
287                &[op],
288                &[proof_inactive.chunk],
289                &root,
290            ));
291
292            // But this proof should *not* verify as a key value proof, since verification will see
293            // that the operation is inactive.
294            assert!(!CleanCurrentTest::verify_key_value_proof(
295                hasher.inner(),
296                k,
297                v1,
298                &proof_inactive,
299                &root,
300            ));
301
302            // Attempt #1 to "fool" the verifier:  change the location to that of an active
303            // operation. This should not fool the verifier if we're properly validating the
304            // inclusion of the operation itself, and not just the chunk.
305            let (_, active_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
306            // The new location should differ but still be in the same chunk.
307            assert_ne!(active_loc, proof_inactive.loc);
308            assert_eq!(
309                MerkleizedBitMap::<deterministic::Context, Digest, 32>::leaf_pos(*active_loc),
310                MerkleizedBitMap::<deterministic::Context, Digest, 32>::leaf_pos(
311                    *proof_inactive.loc
312                )
313            );
314            let mut fake_proof = proof_inactive.clone();
315            fake_proof.loc = active_loc;
316            assert!(!CleanCurrentTest::verify_key_value_proof(
317                hasher.inner(),
318                k,
319                v1,
320                &fake_proof,
321                &root,
322            ));
323
324            // Attempt #2 to "fool" the verifier: Modify the chunk in the proof info to make it look
325            // like the operation is active by flipping its corresponding bit to 1. This should not
326            // fool the verifier if we are correctly incorporating the partial chunk information
327            // into the root computation.
328            let mut modified_chunk = proof_inactive.chunk;
329            let bit_pos = *proof_inactive.loc;
330            let byte_idx = bit_pos / 8;
331            let bit_idx = bit_pos % 8;
332            modified_chunk[byte_idx as usize] |= 1 << bit_idx;
333
334            let mut fake_proof = proof_inactive.clone();
335            fake_proof.chunk = modified_chunk;
336            assert!(!CleanCurrentTest::verify_key_value_proof(
337                hasher.inner(),
338                k,
339                v1,
340                &fake_proof,
341                &root,
342            ));
343
344            db.destroy().await.unwrap();
345        });
346    }
347
348    #[test_traced("DEBUG")]
349    pub fn test_current_db_range_proofs() {
350        let executor = deterministic::Runner::default();
351        executor.start(|mut context| async move {
352            let partition = "range_proofs".to_string();
353            let mut hasher = StandardHasher::<Sha256>::new();
354            let db = open_db(context.with_label("db"), partition.clone()).await;
355            let root = db.root();
356
357            // Empty range proof should not crash or verify, since even an empty db has a single
358            // commit op.
359            let proof = RangeProof {
360                proof: Proof::default(),
361                partial_chunk_digest: None,
362            };
363            assert!(!CleanCurrentTest::verify_range_proof(
364                hasher.inner(),
365                &proof,
366                Location::new_unchecked(0),
367                &[],
368                &[],
369                &root,
370            ));
371
372            let db = apply_random_ops::<CleanCurrentTest>(
373                200,
374                true,
375                context.next_u64(),
376                db.into_mutable(),
377            )
378            .await
379            .unwrap();
380            let (db, _) = db.commit(None).await.unwrap();
381            let db = db.into_merkleized().await.unwrap();
382            let root = db.root();
383
384            // Make sure size-constrained batches of operations are provable from the oldest
385            // retained op to tip.
386            let max_ops = 4;
387            let end_loc = db.size();
388            let start_loc = db.any.inactivity_floor_loc();
389
390            for loc in *start_loc..*end_loc {
391                let loc = Location::new_unchecked(loc);
392                let (proof, ops, chunks) = db
393                    .range_proof(hasher.inner(), loc, NZU64!(max_ops))
394                    .await
395                    .unwrap();
396                assert!(
397                    CleanCurrentTest::verify_range_proof(
398                        hasher.inner(),
399                        &proof,
400                        loc,
401                        &ops,
402                        &chunks,
403                        &root
404                    ),
405                    "failed to verify range at start_loc {start_loc}",
406                );
407                // Proof should not verify if we include extra chunks.
408                let mut chunks_with_extra = chunks.clone();
409                chunks_with_extra.push(chunks[chunks.len() - 1]);
410                assert!(!CleanCurrentTest::verify_range_proof(
411                    hasher.inner(),
412                    &proof,
413                    loc,
414                    &ops,
415                    &chunks_with_extra,
416                    &root,
417                ));
418            }
419
420            db.destroy().await.unwrap();
421        });
422    }
423
424    #[test_traced("DEBUG")]
425    pub fn test_current_db_key_value_proof() {
426        let executor = deterministic::Runner::default();
427        executor.start(|mut context| async move {
428            let partition = "range_proofs".to_string();
429            let mut hasher = StandardHasher::<Sha256>::new();
430            let db = open_db(context.with_label("db"), partition.clone())
431                .await
432                .into_mutable();
433            let db = apply_random_ops::<CleanCurrentTest>(500, true, context.next_u64(), db)
434                .await
435                .unwrap();
436            let (db, _) = db.commit(None).await.unwrap();
437            let db = db.into_merkleized().await.unwrap();
438            let root = db.root();
439
440            // Confirm bad keys produce the expected error.
441            let bad_key = Sha256::fill(0xAA);
442            let res = db.key_value_proof(hasher.inner(), bad_key).await;
443            assert!(matches!(res, Err(Error::KeyNotFound)));
444
445            let start = *db.inactivity_floor_loc();
446            for i in start..db.status.len() {
447                if !db.status.get_bit(i) {
448                    continue;
449                }
450                // Found an active operation! Create a proof for its active current key/value if
451                // it's a key-updating operation.
452                let (key, value) = match db.any.log.read(Location::new_unchecked(i)).await.unwrap()
453                {
454                    Operation::Update(UnorderedUpdate(key, value)) => (key, value),
455                    Operation::CommitFloor(_, _) => continue,
456                    Operation::Delete(_) => {
457                        unreachable!("location does not reference update/commit operation")
458                    }
459                };
460
461                let proof = db.key_value_proof(hasher.inner(), key).await.unwrap();
462                // Proof should validate against the current value and correct root.
463                assert!(CleanCurrentTest::verify_key_value_proof(
464                    hasher.inner(),
465                    key,
466                    value,
467                    &proof,
468                    &root
469                ));
470                // Proof should fail against the wrong value. Use hash instead of fill to ensure
471                // the value differs from any key/value created by TestKey::from_seed (which uses
472                // fill patterns).
473                let wrong_val = Sha256::hash(&[0xFF]);
474                assert!(!CleanCurrentTest::verify_key_value_proof(
475                    hasher.inner(),
476                    key,
477                    wrong_val,
478                    &proof,
479                    &root
480                ));
481                // Proof should fail against the wrong key.
482                let wrong_key = Sha256::hash(&[0xEE]);
483                assert!(!CleanCurrentTest::verify_key_value_proof(
484                    hasher.inner(),
485                    wrong_key,
486                    value,
487                    &proof,
488                    &root
489                ));
490                // Proof should fail against the wrong root.
491                let wrong_root = Sha256::hash(&[0xDD]);
492                assert!(!CleanCurrentTest::verify_key_value_proof(
493                    hasher.inner(),
494                    key,
495                    value,
496                    &proof,
497                    &wrong_root,
498                ));
499            }
500
501            db.destroy().await.unwrap();
502        });
503    }
504
505    /// This test builds a random database, and makes sure that its state is correctly restored
506    /// after closing and re-opening.
507    #[test_traced("WARN")]
508    pub fn test_current_db_build_random_close_reopen() {
509        crate::qmdb::current::tests::test_build_random_close_reopen(open_db);
510    }
511
512    #[test_traced("WARN")]
513    pub fn test_current_db_sync_persists_bitmap_pruning_boundary() {
514        tests::test_sync_persists_bitmap_pruning_boundary::<CleanCurrentTest, _, _>(open_db);
515    }
516
517    /// Repeatedly update the same key to a new value and ensure we can prove its current value
518    /// after each update.
519    #[test_traced("WARN")]
520    pub fn test_current_db_proving_repeated_updates() {
521        let executor = deterministic::Runner::default();
522        executor.start(|context| async move {
523            let mut hasher = StandardHasher::<Sha256>::new();
524            let partition = "build_small".to_string();
525            let mut db = open_db(context.with_label("db"), partition.clone()).await;
526
527            // Add one key.
528            let k = Sha256::fill(0x00);
529            let mut old_val = Sha256::fill(0x00);
530            for i in 1u8..=255 {
531                let v = Sha256::fill(i);
532                let mut dirty_db = db.into_mutable();
533                dirty_db.update(k, v).await.unwrap();
534                assert_eq!(dirty_db.get(&k).await.unwrap().unwrap(), v);
535                let (durable_db, _) = dirty_db.commit(None).await.unwrap();
536                db = durable_db.into_merkleized().await.unwrap();
537                let root = db.root();
538
539                // Create a proof for the current value of k.
540                let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
541                assert!(
542                    CleanCurrentTest::verify_key_value_proof(hasher.inner(), k, v, &proof, &root),
543                    "proof of update {i} failed to verify"
544                );
545                // Ensure the proof does NOT verify if we use the previous value.
546                assert!(
547                    !CleanCurrentTest::verify_key_value_proof(
548                        hasher.inner(),
549                        k,
550                        old_val,
551                        &proof,
552                        &root
553                    ),
554                    "proof of update {i} verified when it should not have"
555                );
556                old_val = v;
557            }
558
559            db.destroy().await.unwrap();
560        });
561    }
562
563    /// This test builds a random database and simulates we can recover from different types of
564    /// failure scenarios.
565    #[test_traced("WARN")]
566    pub fn test_current_db_simulate_write_failures() {
567        crate::qmdb::current::tests::test_simulate_write_failures(open_db);
568    }
569
570    #[test_traced("WARN")]
571    pub fn test_current_db_different_pruning_delays_same_root() {
572        tests::test_different_pruning_delays_same_root::<CleanCurrentTest, _, _>(open_db);
573    }
574
575    #[test_traced("DEBUG")]
576    fn test_batch() {
577        batch_tests::test_batch(|mut ctx| async move {
578            let seed = ctx.next_u64();
579            let prefix = format!("current_unordered_batch_{seed}");
580            open_db(ctx, prefix).await.into_mutable()
581        });
582    }
583
584    #[allow(dead_code)]
585    fn assert_clean_db_futures_are_send(db: &mut CleanCurrentTest, key: Digest, loc: Location) {
586        assert_gettable(db, &key);
587        assert_log_store(db);
588        assert_prunable_store(db, loc);
589        assert_merkleized_store(db, loc);
590        assert_send(db.sync());
591    }
592
593    #[allow(dead_code)]
594    fn assert_dirty_db_futures_are_send(db: &mut MutableCurrentTest, key: Digest, value: Digest) {
595        assert_gettable(db, &key);
596        assert_log_store(db);
597        assert_send(db.update(key, value));
598        assert_send(db.create(key, value));
599        assert_deletable(db, key);
600        assert_batchable(db, key, value);
601    }
602
603    #[allow(dead_code)]
604    fn assert_mutable_db_commit_is_send(db: MutableCurrentTest) {
605        assert_send(db.commit(None));
606    }
607}