Skip to main content

exoware_qmdb/
lib.rs

1//! Store-backed bridge for Commonware authenticated storage proofs.
2//!
3//! The crate currently supports multiple Commonware authenticated backends:
4//! - ordered QMDB (`qmdb::any` and `qmdb::current::ordered`)
5//! - immutable (`qmdb::immutable`)
6//! - keyless (`qmdb::keyless`)
7//!
8//! Writers upload exact Commonware operations into the Exoware store, then publish an
9//! externally authoritative watermark once the uploaded prefix is complete.
10//!
11//! Uploads may still happen concurrently and out of order. Current batch-boundary
12//! state may also be uploaded ahead of publication. Only watermark publication is
13//! monotonic: publishing watermark `W` means the whole contiguous prefix
14//! `[0, W]` is available and may now be trusted by readers.
15//!
16//! Readers fence historical queries against that low watermark. Historical proofs
17//! use the global ops-MMR nodes stored by `Position`.
18//!
19//! Current ordered proofs use versioned current-state deltas:
20//! - bitmap chunk rows
21//! - grafted-node rows
22//!
23//! Those rows are versioned by uploaded batch boundary `Location`, not by the
24//! final published watermark. That is what preserves lower-boundary current
25//! proofs below a later published low watermark.
26
27mod auth;
28mod boundary;
29pub(crate) mod codec;
30mod connect;
31mod connect_client;
32mod core;
33pub mod error;
34pub mod proof;
35pub mod prune;
36pub(crate) mod storage;
37
38mod immutable;
39mod keyless;
40mod ordered;
41mod subscription;
42mod unordered;
43mod writer;
44
45pub use error::{ProofKind, QmdbError};
46pub use immutable::ImmutableClient;
47pub use keyless::KeylessClient;
48pub use ordered::OrderedClient;
49pub use proof::{
50    OperationRangeCheckpoint, RawCurrentRangeProof, RawKeyValueProof, RawMmrProof, RawMultiProof,
51    VariantRoot, VerifiedCurrentRange, VerifiedKeyValue, VerifiedMultiOperations,
52    VerifiedOperationRange, VerifiedVariantRange,
53};
54pub use unordered::UnorderedClient;
55pub use writer::{
56    build_immutable_upload, build_keyless_upload, build_ordered_upload, build_unordered_upload,
57    BuiltImmutableUpload, BuiltKeylessUpload, BuiltOrderedUpload, BuiltUnorderedUpload,
58    ImmutableWriter, KeylessWriter, OrderedWriter, PreparedUpload, PreparedWatermark,
59    UnorderedWriter,
60};
61
62pub use boundary::recover_boundary_state;
63pub use connect::{
64    immutable_range_connect_stack, keyless_range_connect_stack, ordered_connect_stack,
65    unordered_range_connect_stack, ImmutableRangeConnect, KeylessRangeConnect, OrderedConnect,
66    OrderedRangeConnect, UnorderedRangeConnect,
67};
68pub use connect_client::{
69    ImmutableRangeConnectClient, KeylessRangeConnectClient, OrderedConnectClient,
70    OrderedRangeConnectClient, RangeConnectSubscription, RangeSubscribeProof,
71    UnorderedRangeConnectClient,
72};
73
74use commonware_codec::Encode;
75use commonware_cryptography::{Digest, Hasher};
76use commonware_storage::mmr::{iterator::PeakIterator, Location, Position, Proof, StandardHasher};
77
78/// Maximum encoded operation size for QMDB key and value payloads (u16 length on the wire).
79pub const MAX_OPERATION_SIZE: usize = u16::MAX as usize;
80
81/// QMDB proof/root variant supported by `exoware-qmdb`.
82#[derive(Clone, Copy, Debug, PartialEq, Eq)]
83pub enum QmdbVariant {
84    /// Historical `qmdb::any` root / proof over the uploaded ordered operation log.
85    Any,
86    /// Current-state `qmdb::current::ordered` root / proof at an uploaded batch boundary.
87    Current,
88}
89
90/// Historical value resolved for one logical key.
91#[derive(Clone, Debug, PartialEq, Eq)]
92pub struct VersionedValue<K, V> {
93    pub key: K,
94    pub location: Location,
95    pub value: Option<V>,
96}
97
98/// Metadata returned after uploading one batch of QMDB operations.
99#[derive(Clone, Copy, Debug, PartialEq, Eq)]
100pub struct UploadReceipt {
101    /// Monotonic request id assigned by the writer for this upload.
102    pub writer_request_id: u64,
103    /// Inclusive maximum Location of ops in this batch.
104    pub latest_location: Location,
105    /// Store sequence number at which this upload's rows became durable.
106    pub store_sequence_number: u64,
107    /// The watermark this batch published, if any. `None` when pipelining
108    /// deferred the watermark to a later `flush()` or batch.
109    pub writer_location_watermark: Option<PublishedCheckpoint>,
110}
111
112/// Writer publication point that is known to be durable in Store.
113#[derive(Clone, Copy, Debug, PartialEq, Eq)]
114pub struct PublishedCheckpoint {
115    /// Inclusive maximum QMDB Location authorized by this checkpoint.
116    pub location: Location,
117    /// Store sequence number at which the checkpoint became visible.
118    pub sequence_number: u64,
119}
120
121/// Caller-owned frontier for resuming a single-writer helper without reading
122/// the store.
123#[derive(Clone, Debug, PartialEq, Eq)]
124pub struct WriterState<D: Digest> {
125    pub peaks: Vec<(Position, u32, D)>,
126    pub ops_size: Position,
127    pub next_location: Location,
128}
129
130impl<D: Digest> WriterState<D> {
131    pub fn empty() -> Self {
132        Self {
133            peaks: Vec::new(),
134            ops_size: Position::new(0),
135            next_location: Location::new(0),
136        }
137    }
138
139    pub fn latest_committed_location(&self) -> Option<Location> {
140        self.next_location.checked_sub(1)
141    }
142
143    pub fn from_checkpoint<H: Hasher<Digest = D>>(
144        checkpoint: &OperationRangeCheckpoint<D>,
145    ) -> Result<Self, QmdbError> {
146        Ok(Self {
147            peaks: checkpoint.reconstruct_peaks::<H>()?,
148            ops_size: Position::try_from(checkpoint.proof.leaves).map_err(|e| {
149                QmdbError::CorruptData(format!("invalid checkpoint leaf count: {e}"))
150            })?,
151            next_location: checkpoint
152                .watermark
153                .checked_add(1)
154                .ok_or_else(|| QmdbError::CorruptData("checkpoint watermark overflow".into()))?,
155        })
156    }
157
158    pub fn from_proof<H, Op>(
159        watermark: Location,
160        start_location: Location,
161        proof: &Proof<D>,
162        operations: &[Op],
163    ) -> Result<Self, QmdbError>
164    where
165        H: Hasher<Digest = D>,
166        Op: Encode,
167    {
168        let encoded_operations: Vec<Vec<u8>> =
169            operations.iter().map(|op| op.encode().to_vec()).collect();
170        let mut hasher = StandardHasher::<H>::new();
171        let peak_digests = proof
172            .reconstruct_peak_digests(&mut hasher, &encoded_operations, start_location, None)
173            .map_err(|e| QmdbError::CorruptData(format!("reconstruct proof peaks failed: {e}")))?;
174        let ops_size = Position::try_from(proof.leaves)
175            .map_err(|e| QmdbError::CorruptData(format!("invalid proof leaf count: {e}")))?;
176        let peak_entries: Vec<(Position, u32)> = PeakIterator::new(ops_size).collect();
177        if peak_entries.len() != peak_digests.len() {
178            return Err(QmdbError::CorruptData(format!(
179                "proof peak count mismatch: expected {}, got {}",
180                peak_entries.len(),
181                peak_digests.len()
182            )));
183        }
184        Ok(Self {
185            peaks: peak_entries
186                .into_iter()
187                .zip(peak_digests)
188                .map(|((pos, height), digest)| (pos, height, digest))
189                .collect(),
190            ops_size,
191            next_location: watermark
192                .checked_add(1)
193                .ok_or_else(|| QmdbError::CorruptData("proof watermark overflow".into()))?,
194        })
195    }
196}
197
198/// Current-state rows for one uploaded ordered batch boundary.
199///
200/// Ordered QMDB uploads carry more than the historical op log: each published
201/// batch boundary also stores the current-state root plus the subset of bitmap
202/// chunks and grafted-MMR nodes that changed at that boundary. This struct is
203/// that versioned delta payload.
204///
205/// Callers typically obtain it from [`recover_boundary_state`], using a local
206/// Commonware `current::ordered::Db`, and then pass it to
207/// [`OrderedWriter::prepare_upload`].
208#[derive(Clone, Debug, PartialEq, Eq)]
209pub struct CurrentBoundaryState<D: Digest, const N: usize> {
210    /// Canonical current-state root at this batch boundary.
211    pub root: D,
212    /// Changed bitmap chunks keyed by chunk index.
213    pub chunks: Vec<(u64, [u8; N])>,
214    /// Changed grafted-MMR digests keyed by ops-space MMR position.
215    pub grafted_nodes: Vec<(commonware_storage::mmr::Position, D)>,
216}