Skip to main content

vote_commitment_tree/
server.rs

1//! Generic and production vote commitment tree servers.
2//!
3//! [`GenericTreeServer`] is a single parameterised struct that backs both the
4//! production server ([`TreeServer`], KV-backed) and the in-memory test/POC
5//! server. The only difference between the two is the shard store
6//! implementation they use.
7//!
8//! [`SyncableServer`] wraps [`GenericTreeServer`] and adds per-block leaf
9//! tracking (`blocks`, `pending_leaves`, `pending_start`) needed to implement
10//! [`crate::sync_api::TreeSyncApi`]. This is a sync-protocol concern, not a
11//! tree concern, so it lives in a separate wrapper rather than on the core
12//! tree type. [`MemoryTreeServer`] is a convenience alias for
13//! `SyncableServer<MemoryShardStore<…>>`.
14//!
15//! In production, [`TreeServer`] is backed by a [`KvShardStore`] so all shard
16//! reads/writes go directly to the Cosmos KV store through Go callbacks,
17//! giving `ShardTree` true lazy loading.
18
19use std::collections::BTreeMap;
20
21use incrementalmerkletree::{Hashable, Level, Retention};
22use pasta_curves::{group::ff::PrimeField, Fp};
23use shardtree::{
24    error::ShardTreeError,
25    store::{memory::MemoryShardStore, ShardStore},
26    ShardTree,
27};
28
29use crate::hash::{MerkleHashVote, MAX_CHECKPOINTS, SHARD_HEIGHT, TREE_DEPTH};
30use crate::kv_shard_store::{KvCallbacks, KvError, KvShardStore};
31use crate::path::MerklePath;
32use crate::sync_api::BlockCommitments;
33
34// ---------------------------------------------------------------------------
35// GenericTreeServer
36// ---------------------------------------------------------------------------
37
38/// An append-only Poseidon Merkle tree server backed by any [`shardtree::store::ShardStore`].
39///
40/// Use the type aliases [`TreeServer`] (KV-backed) and [`MemoryTreeServer`]
41/// (in-memory) rather than naming this type directly.
42///
43/// Methods that mutate the tree return `Result` so storage failures are visible to
44/// callers. For [`MemoryTreeServer`] the error type is `Infallible`, so those
45/// results can be safely `.unwrap()`-ed; for [`TreeServer`] the error type is
46/// [`crate::kv_shard_store::KvError`] and must be propagated.
47pub struct GenericTreeServer<
48    S: shardtree::store::ShardStore<H = MerkleHashVote, CheckpointId = u32>,
49> {
50    pub(crate) inner: ShardTree<S, { TREE_DEPTH as u8 }, { SHARD_HEIGHT }>,
51    /// Latest checkpoint id (block height) that has been recorded.
52    pub(crate) latest_checkpoint: Option<u32>,
53    /// Number of leaves appended so far.
54    pub(crate) next_position: u64,
55}
56
57/// Production vote commitment tree backed by the Cosmos KV store.
58pub type TreeServer = GenericTreeServer<KvShardStore>;
59
60/// A [`GenericTreeServer`] augmented with per-block leaf tracking for the
61/// [`crate::sync_api::TreeSyncApi`].
62///
63/// The sync protocol needs to know which leaves were appended in each block so
64/// that clients can download compact-block diffs. That tracking is a sync
65/// concern, not a tree concern, so it lives here rather than on
66/// [`GenericTreeServer`].
67///
68/// Use the [`MemoryTreeServer`] type alias for the common in-memory case.
69pub struct SyncableServer<S: ShardStore<H = MerkleHashVote, CheckpointId = u32>> {
70    pub(crate) tree: GenericTreeServer<S>,
71    /// Completed blocks: height → commitments (populated on `checkpoint`).
72    pub(crate) blocks: BTreeMap<u32, BlockCommitments>,
73    /// Leaves accumulated for the current (not yet checkpointed) block.
74    pending_leaves: Vec<MerkleHashVote>,
75    /// Start index for the pending block.
76    pending_start: u64,
77}
78
79/// In-memory vote commitment tree for tests and the POC helper server.
80///
81/// This is a type alias for [`SyncableServer`] backed by [`MemoryShardStore`].
82/// It supports [`crate::sync_api::TreeSyncApi`] via per-block leaf tracking
83/// inside `SyncableServer`. Tests that do not need sync can use
84/// `GenericTreeServer<MemoryShardStore<MerkleHashVote, u32>>` directly.
85pub type MemoryTreeServer = SyncableServer<MemoryShardStore<MerkleHashVote, u32>>;
86
87// ---------------------------------------------------------------------------
88// AppendFromKvError
89// ---------------------------------------------------------------------------
90
91/// Error returned by [`TreeServer::append_from_kv`].
92#[derive(Debug)]
93pub enum AppendFromKvError {
94    /// A KV callback failed while reading a leaf.
95    Kv(KvError),
96    /// A leaf key was missing from the application KV store.
97    MissingLeaf(u64),
98    /// A leaf blob had an unexpected length or a non-canonical Fp encoding.
99    MalformedLeaf(u64),
100    /// The underlying `ShardTree` rejected the append (e.g. tree full or
101    /// storage error).
102    Tree(ShardTreeError<KvError>),
103}
104
105impl From<KvError> for AppendFromKvError {
106    fn from(e: KvError) -> Self {
107        AppendFromKvError::Kv(e)
108    }
109}
110
111impl std::fmt::Display for AppendFromKvError {
112    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
113        match self {
114            AppendFromKvError::Kv(e) => write!(f, "KV error reading leaf: {}", e),
115            AppendFromKvError::MissingLeaf(i) => {
116                write!(f, "leaf at index {} is missing from KV", i)
117            }
118            AppendFromKvError::MalformedLeaf(i) => {
119                write!(
120                    f,
121                    "leaf at index {} is malformed (wrong length or non-canonical Fp)",
122                    i
123                )
124            }
125            AppendFromKvError::Tree(e) => write!(f, "ShardTree error: {:?}", e),
126        }
127    }
128}
129
130impl std::error::Error for AppendFromKvError {}
131
132// ---------------------------------------------------------------------------
133// CheckpointError
134// ---------------------------------------------------------------------------
135
136/// Error returned by [`GenericTreeServer::checkpoint`] and [`SyncableServer::checkpoint`].
137#[derive(Debug)]
138pub enum CheckpointError<E> {
139    /// The requested checkpoint height is not strictly greater than the most
140    /// recently recorded one. Checkpoint IDs must increase monotonically.
141    NotMonotonic { prev: u32, requested: u32 },
142    /// The underlying [`ShardTree`] rejected the checkpoint (storage error).
143    Tree(ShardTreeError<E>),
144}
145
146impl<E: std::fmt::Debug> std::fmt::Display for CheckpointError<E> {
147    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
148        match self {
149            CheckpointError::NotMonotonic { prev, requested } => write!(
150                f,
151                "checkpoint height must be strictly increasing: {} <= {}",
152                requested, prev
153            ),
154            CheckpointError::Tree(e) => write!(f, "ShardTree error: {:?}", e),
155        }
156    }
157}
158
159impl<E: std::fmt::Debug + 'static> std::error::Error for CheckpointError<E> {}
160
161impl<E> From<ShardTreeError<E>> for CheckpointError<E> {
162    fn from(e: ShardTreeError<E>) -> Self {
163        CheckpointError::Tree(e)
164    }
165}
166
167// ---------------------------------------------------------------------------
168// TreeServer constructor
169// ---------------------------------------------------------------------------
170
171impl TreeServer {
172    /// Create a new KV-backed tree server.
173    ///
174    /// `next_position` is `CommitmentTreeState.NextIndex` from KV (0 on first
175    /// boot). On a cold start, `latest_checkpoint` is initialised from the
176    /// maximum checkpoint ID persisted in the KV store, so that `root()`
177    /// returns the correct value even before the first checkpoint after restart.
178    pub fn new(cb: KvCallbacks, next_position: u64) -> Self {
179        let store = KvShardStore::new(cb);
180        // Initialise latest_checkpoint from the KV store before handing the
181        // store to ShardTree (which takes ownership). Errors are treated as
182        // "no checkpoint" — the tree will re-checkpoint on the next append.
183        let latest_checkpoint = store.max_checkpoint_id().unwrap_or(None);
184        Self {
185            inner: ShardTree::new(store, MAX_CHECKPOINTS),
186            latest_checkpoint,
187            next_position,
188        }
189    }
190}
191
192// ---------------------------------------------------------------------------
193// TreeServer: append_from_kv (delta-append directly from KV)
194// ---------------------------------------------------------------------------
195
196impl TreeServer {
197    /// Append `count` leaves starting at `cursor` by reading them directly
198    /// from the application KV store via KV callbacks, skipping the Go-side
199    /// leaf fetch and CGO serialization round-trip.
200    ///
201    /// Each leaf is stored at `0x02 || u64 BE index` in the Cosmos KV store
202    /// (the `CommitmentLeafKey` format from `types/keys.go`). On success, the
203    /// tree's internal leaf count advances by `count`.
204    ///
205    /// This is the production delta-append path: a single CGO call to this
206    /// function replaces the `newLeaves` allocation + per-leaf KV read loop
207    /// that was previously done in `ensureTreeLoaded`.
208    pub fn append_from_kv(&mut self, cursor: u64, count: u64) -> Result<(), AppendFromKvError> {
209        for i in cursor..cursor + count {
210            let key = Self::leaf_key(i);
211            let blob = self
212                .inner
213                .store()
214                .cb
215                .get(&key)?
216                .ok_or(AppendFromKvError::MissingLeaf(i))?;
217            if blob.len() != 32 {
218                return Err(AppendFromKvError::MalformedLeaf(i));
219            }
220            let mut repr = [0u8; 32];
221            repr.copy_from_slice(&blob);
222            let fp: Option<Fp> = Fp::from_repr(repr).into();
223            let fp = fp.ok_or(AppendFromKvError::MalformedLeaf(i))?;
224            self.append(fp).map_err(AppendFromKvError::Tree)?;
225        }
226        Ok(())
227    }
228
229    /// KV key for an app-level commitment leaf: `0x02 || u64 BE index`.
230    ///
231    /// Matches `types.CommitmentLeafKey(index)` in keys.go.
232    fn leaf_key(index: u64) -> [u8; 9] {
233        let mut k = [0u8; 9];
234        k[0] = 0x02;
235        k[1..].copy_from_slice(&index.to_be_bytes());
236        k
237    }
238}
239
240// ---------------------------------------------------------------------------
241// SyncableServer constructor
242// ---------------------------------------------------------------------------
243
244impl SyncableServer<MemoryShardStore<MerkleHashVote, u32>> {
245    /// Create an empty in-memory syncable tree.
246    pub fn empty() -> Self {
247        Self {
248            tree: GenericTreeServer {
249                inner: ShardTree::new(MemoryShardStore::empty(), MAX_CHECKPOINTS),
250                latest_checkpoint: None,
251                next_position: 0,
252            },
253            blocks: BTreeMap::new(),
254            pending_leaves: Vec::new(),
255            pending_start: 0,
256        }
257    }
258}
259
260// ---------------------------------------------------------------------------
261// SyncableServer: shared impl for all S
262// ---------------------------------------------------------------------------
263
264impl<S> SyncableServer<S>
265where
266    S: ShardStore<H = MerkleHashVote, CheckpointId = u32>,
267    S::Error: std::fmt::Debug,
268{
269    /// Wrap an existing [`GenericTreeServer`] with sync tracking.
270    pub fn new(tree: GenericTreeServer<S>) -> Self {
271        Self {
272            tree,
273            blocks: BTreeMap::new(),
274            pending_leaves: Vec::new(),
275            pending_start: 0,
276        }
277    }
278
279    /// Append a single leaf and record it in the pending block.
280    pub fn append(&mut self, leaf: Fp) -> Result<u64, ShardTreeError<S::Error>> {
281        let hash = MerkleHashVote::from_fp(leaf);
282        let idx = self.tree.append(leaf)?;
283        self.pending_leaves.push(hash);
284        Ok(idx)
285    }
286
287    /// Append two leaves (e.g. new VAN + VC from `MsgCastVote`).
288    pub fn append_two(&mut self, first: Fp, second: Fp) -> Result<u64, ShardTreeError<S::Error>> {
289        let idx = self.append(first)?;
290        self.append(second)?;
291        Ok(idx)
292    }
293
294    /// Snapshot the current tree state and record block-level commitments.
295    ///
296    /// # Errors
297    /// Returns [`CheckpointError::NotMonotonic`] if `height` is not strictly
298    /// greater than the previous checkpoint height.
299    pub fn checkpoint(&mut self, height: u32) -> Result<(), CheckpointError<S::Error>> {
300        self.tree.checkpoint(height)?;
301        let root = self
302            .root_at_height(height)
303            .unwrap_or_else(|| self.tree.root());
304        let commitments = BlockCommitments {
305            height,
306            start_index: self.pending_start,
307            leaves: std::mem::take(&mut self.pending_leaves),
308            root,
309        };
310        self.blocks.insert(height, commitments);
311        self.pending_start = self.tree.next_position;
312        Ok(())
313    }
314
315    /// Current Merkle root (at the latest checkpoint).
316    pub fn root(&self) -> Fp {
317        self.tree.root()
318    }
319
320    /// Root at a specific checkpoint height.
321    pub fn root_at_height(&self, height: u32) -> Option<Fp> {
322        self.tree.root_at_height(height)
323    }
324
325    /// Number of leaves appended.
326    pub fn size(&self) -> u64 {
327        self.tree.size()
328    }
329
330    /// Build a Merkle path for the leaf at `position` at `anchor_height`.
331    pub fn path(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
332        self.tree.path(position, anchor_height)
333    }
334
335    /// Latest checkpoint height.
336    pub fn latest_checkpoint(&self) -> Option<u32> {
337        self.tree.latest_checkpoint
338    }
339}
340
341// ---------------------------------------------------------------------------
342// Shared impl for all GenericTreeServer<S>
343// ---------------------------------------------------------------------------
344
345impl<S> GenericTreeServer<S>
346where
347    S: shardtree::store::ShardStore<H = MerkleHashVote, CheckpointId = u32>,
348    S::Error: std::fmt::Debug,
349{
350    /// Append a single leaf (e.g. one VAN from `MsgDelegateVote`).
351    ///
352    /// The leaf is marked so witnesses can be generated for it later.
353    /// Returns the leaf index.
354    pub fn append(&mut self, leaf: Fp) -> Result<u64, ShardTreeError<S::Error>> {
355        let index = self.next_position;
356        let hash = MerkleHashVote::from_fp(leaf);
357        self.inner.append(hash, Retention::Marked)?;
358        self.next_position += 1;
359        Ok(index)
360    }
361
362    /// Append two leaves (e.g. new VAN + VC from `MsgCastVote`).
363    ///
364    /// Returns the index of the first leaf.
365    pub fn append_two(&mut self, first: Fp, second: Fp) -> Result<u64, ShardTreeError<S::Error>> {
366        let index = self.append(first)?;
367        self.append(second)?;
368        Ok(index)
369    }
370
371    /// Snapshot the current tree state at the given block height.
372    ///
373    /// Called by EndBlocker after processing all transactions in a block.
374    /// The root at this checkpoint becomes a valid anchor for ZKP #2 / ZKP #3.
375    ///
376    /// # Errors
377    /// Returns [`CheckpointError::NotMonotonic`] if `height` is not strictly
378    /// greater than the previous checkpoint height.
379    /// Returns [`CheckpointError::Tree`] if the underlying shard store fails.
380    pub fn checkpoint(&mut self, height: u32) -> Result<(), CheckpointError<S::Error>> {
381        if let Some(prev) = self.latest_checkpoint {
382            if height <= prev {
383                return Err(CheckpointError::NotMonotonic {
384                    prev,
385                    requested: height,
386                });
387            }
388        }
389        self.inner.checkpoint(height)?;
390        self.latest_checkpoint = Some(height);
391        Ok(())
392    }
393
394    /// Current Merkle root (at the latest checkpoint).
395    pub fn root(&self) -> Fp {
396        if let Some(id) = self.latest_checkpoint {
397            self.inner
398                .root_at_checkpoint_id(&id)
399                .ok()
400                .flatten()
401                .map(|h| h.0)
402                .unwrap_or_else(|| MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0)
403        } else {
404            MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0
405        }
406    }
407
408    /// Root at a specific checkpoint height (anchor lookup).
409    ///
410    /// Returns `None` if the checkpoint does not exist.
411    pub fn root_at_height(&self, height: u32) -> Option<Fp> {
412        self.inner
413            .root_at_checkpoint_id(&height)
414            .ok()
415            .flatten()
416            .map(|h| h.0)
417    }
418
419    /// Number of leaves appended.
420    pub fn size(&self) -> u64 {
421        self.next_position
422    }
423
424    /// Set the leaf count directly (e.g. to restore after a cold start when
425    /// `next_position` was not passed to [`TreeServer::new`]).
426    pub fn set_next_position(&mut self, pos: u64) {
427        self.next_position = pos;
428    }
429
430    /// Build a Merkle path for the leaf at `position`, valid at the given
431    /// checkpoint `anchor_height`.
432    ///
433    /// Returns `None` if the position or checkpoint is invalid.
434    pub fn path(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
435        let pos = incrementalmerkletree::Position::from(position);
436        self.inner
437            .witness_at_checkpoint_id(pos, &anchor_height)
438            .ok()
439            .flatten()
440            .map(MerklePath::from)
441    }
442}