vote_commitment_tree/server.rs
1//! Generic and production vote commitment tree servers.
2//!
3//! [`GenericTreeServer`] is a single parameterised struct that backs both the
4//! production server ([`TreeServer`], KV-backed) and the in-memory test/POC
5//! server. The only difference between the two is the shard store
6//! implementation they use.
7//!
8//! [`SyncableServer`] wraps [`GenericTreeServer`] and adds per-block leaf
9//! tracking (`blocks`, `pending_leaves`, `pending_start`) needed to implement
10//! [`crate::sync_api::TreeSyncApi`]. This is a sync-protocol concern, not a
11//! tree concern, so it lives in a separate wrapper rather than on the core
12//! tree type. [`MemoryTreeServer`] is a convenience alias for
13//! `SyncableServer<MemoryShardStore<…>>`.
14//!
15//! In production, [`TreeServer`] is backed by a [`KvShardStore`] so all shard
16//! reads/writes go directly to the Cosmos KV store through Go callbacks,
17//! giving `ShardTree` true lazy loading.
18
19use std::collections::BTreeMap;
20
21use incrementalmerkletree::{Hashable, Level, Retention};
22use pasta_curves::{group::ff::PrimeField, Fp};
23use shardtree::{error::ShardTreeError, store::{memory::MemoryShardStore, ShardStore}, ShardTree};
24
25use crate::hash::{MerkleHashVote, MAX_CHECKPOINTS, SHARD_HEIGHT, TREE_DEPTH};
26use crate::kv_shard_store::{KvCallbacks, KvError, KvShardStore};
27use crate::path::MerklePath;
28use crate::sync_api::BlockCommitments;
29
30// ---------------------------------------------------------------------------
31// GenericTreeServer
32// ---------------------------------------------------------------------------
33
34/// An append-only Poseidon Merkle tree server backed by any [`shardtree::store::ShardStore`].
35///
36/// Use the type aliases [`TreeServer`] (KV-backed) and [`MemoryTreeServer`]
37/// (in-memory) rather than naming this type directly.
38///
39/// Methods that mutate the tree return `Result` so storage failures are visible to
40/// callers. For [`MemoryTreeServer`] the error type is `Infallible`, so those
41/// results can be safely `.unwrap()`-ed; for [`TreeServer`] the error type is
42/// [`crate::kv_shard_store::KvError`] and must be propagated.
43pub struct GenericTreeServer<S: shardtree::store::ShardStore<H = MerkleHashVote, CheckpointId = u32>>
44{
45 pub(crate) inner: ShardTree<S, { TREE_DEPTH as u8 }, { SHARD_HEIGHT }>,
46 /// Latest checkpoint id (block height) that has been recorded.
47 pub(crate) latest_checkpoint: Option<u32>,
48 /// Number of leaves appended so far.
49 pub(crate) next_position: u64,
50}
51
52/// Production vote commitment tree backed by the Cosmos KV store.
53pub type TreeServer = GenericTreeServer<KvShardStore>;
54
55/// A [`GenericTreeServer`] augmented with per-block leaf tracking for the
56/// [`crate::sync_api::TreeSyncApi`].
57///
58/// The sync protocol needs to know which leaves were appended in each block so
59/// that clients can download compact-block diffs. That tracking is a sync
60/// concern, not a tree concern, so it lives here rather than on
61/// [`GenericTreeServer`].
62///
63/// Use the [`MemoryTreeServer`] type alias for the common in-memory case.
64pub struct SyncableServer<S: ShardStore<H = MerkleHashVote, CheckpointId = u32>> {
65 pub(crate) tree: GenericTreeServer<S>,
66 /// Completed blocks: height → commitments (populated on `checkpoint`).
67 pub(crate) blocks: BTreeMap<u32, BlockCommitments>,
68 /// Leaves accumulated for the current (not yet checkpointed) block.
69 pending_leaves: Vec<MerkleHashVote>,
70 /// Start index for the pending block.
71 pending_start: u64,
72}
73
74/// In-memory vote commitment tree for tests and the POC helper server.
75///
76/// This is a type alias for [`SyncableServer`] backed by [`MemoryShardStore`].
77/// It supports [`crate::sync_api::TreeSyncApi`] via per-block leaf tracking
78/// inside `SyncableServer`. Tests that do not need sync can use
79/// `GenericTreeServer<MemoryShardStore<MerkleHashVote, u32>>` directly.
80pub type MemoryTreeServer = SyncableServer<MemoryShardStore<MerkleHashVote, u32>>;
81
82// ---------------------------------------------------------------------------
83// AppendFromKvError
84// ---------------------------------------------------------------------------
85
86/// Error returned by [`TreeServer::append_from_kv`].
87#[derive(Debug)]
88pub enum AppendFromKvError {
89 /// A KV callback failed while reading a leaf.
90 Kv(KvError),
91 /// A leaf key was missing from the application KV store.
92 MissingLeaf(u64),
93 /// A leaf blob had an unexpected length or a non-canonical Fp encoding.
94 MalformedLeaf(u64),
95 /// The underlying `ShardTree` rejected the append (e.g. tree full or
96 /// storage error).
97 Tree(ShardTreeError<KvError>),
98}
99
100impl From<KvError> for AppendFromKvError {
101 fn from(e: KvError) -> Self {
102 AppendFromKvError::Kv(e)
103 }
104}
105
106impl std::fmt::Display for AppendFromKvError {
107 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
108 match self {
109 AppendFromKvError::Kv(e) => write!(f, "KV error reading leaf: {}", e),
110 AppendFromKvError::MissingLeaf(i) => write!(f, "leaf at index {} is missing from KV", i),
111 AppendFromKvError::MalformedLeaf(i) => {
112 write!(f, "leaf at index {} is malformed (wrong length or non-canonical Fp)", i)
113 }
114 AppendFromKvError::Tree(e) => write!(f, "ShardTree error: {:?}", e),
115 }
116 }
117}
118
119impl std::error::Error for AppendFromKvError {}
120
121// ---------------------------------------------------------------------------
122// CheckpointError
123// ---------------------------------------------------------------------------
124
125/// Error returned by [`GenericTreeServer::checkpoint`] and [`SyncableServer::checkpoint`].
126#[derive(Debug)]
127pub enum CheckpointError<E> {
128 /// The requested checkpoint height is not strictly greater than the most
129 /// recently recorded one. Checkpoint IDs must increase monotonically.
130 NotMonotonic { prev: u32, requested: u32 },
131 /// The underlying [`ShardTree`] rejected the checkpoint (storage error).
132 Tree(ShardTreeError<E>),
133}
134
135impl<E: std::fmt::Debug> std::fmt::Display for CheckpointError<E> {
136 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
137 match self {
138 CheckpointError::NotMonotonic { prev, requested } => write!(
139 f,
140 "checkpoint height must be strictly increasing: {} <= {}",
141 requested, prev
142 ),
143 CheckpointError::Tree(e) => write!(f, "ShardTree error: {:?}", e),
144 }
145 }
146}
147
148impl<E: std::fmt::Debug + 'static> std::error::Error for CheckpointError<E> {}
149
150impl<E> From<ShardTreeError<E>> for CheckpointError<E> {
151 fn from(e: ShardTreeError<E>) -> Self {
152 CheckpointError::Tree(e)
153 }
154}
155
156// ---------------------------------------------------------------------------
157// TreeServer constructor
158// ---------------------------------------------------------------------------
159
160impl TreeServer {
161 /// Create a new KV-backed tree server.
162 ///
163 /// `next_position` is `CommitmentTreeState.NextIndex` from KV (0 on first
164 /// boot). On a cold start, `latest_checkpoint` is initialised from the
165 /// maximum checkpoint ID persisted in the KV store, so that `root()`
166 /// returns the correct value even before the first checkpoint after restart.
167 pub fn new(cb: KvCallbacks, next_position: u64) -> Self {
168 let store = KvShardStore::new(cb);
169 // Initialise latest_checkpoint from the KV store before handing the
170 // store to ShardTree (which takes ownership). Errors are treated as
171 // "no checkpoint" — the tree will re-checkpoint on the next append.
172 let latest_checkpoint = store.max_checkpoint_id().unwrap_or(None);
173 Self {
174 inner: ShardTree::new(store, MAX_CHECKPOINTS),
175 latest_checkpoint,
176 next_position,
177 }
178 }
179}
180
181// ---------------------------------------------------------------------------
182// TreeServer: append_from_kv (delta-append directly from KV)
183// ---------------------------------------------------------------------------
184
185impl TreeServer {
186 /// Append `count` leaves starting at `cursor` by reading them directly
187 /// from the application KV store via KV callbacks, skipping the Go-side
188 /// leaf fetch and CGO serialization round-trip.
189 ///
190 /// Each leaf is stored at `0x02 || u64 BE index` in the Cosmos KV store
191 /// (the `CommitmentLeafKey` format from `types/keys.go`). On success, the
192 /// tree's internal leaf count advances by `count`.
193 ///
194 /// This is the production delta-append path: a single CGO call to this
195 /// function replaces the `newLeaves` allocation + per-leaf KV read loop
196 /// that was previously done in `ensureTreeLoaded`.
197 pub fn append_from_kv(&mut self, cursor: u64, count: u64) -> Result<(), AppendFromKvError> {
198 for i in cursor..cursor + count {
199 let key = Self::leaf_key(i);
200 let blob = self
201 .inner
202 .store()
203 .cb
204 .get(&key)?
205 .ok_or(AppendFromKvError::MissingLeaf(i))?;
206 if blob.len() != 32 {
207 return Err(AppendFromKvError::MalformedLeaf(i));
208 }
209 let mut repr = [0u8; 32];
210 repr.copy_from_slice(&blob);
211 let fp: Option<Fp> = Fp::from_repr(repr).into();
212 let fp = fp.ok_or(AppendFromKvError::MalformedLeaf(i))?;
213 self.append(fp).map_err(AppendFromKvError::Tree)?;
214 }
215 Ok(())
216 }
217
218 /// KV key for an app-level commitment leaf: `0x02 || u64 BE index`.
219 ///
220 /// Matches `types.CommitmentLeafKey(index)` in keys.go.
221 fn leaf_key(index: u64) -> [u8; 9] {
222 let mut k = [0u8; 9];
223 k[0] = 0x02;
224 k[1..].copy_from_slice(&index.to_be_bytes());
225 k
226 }
227}
228
229// ---------------------------------------------------------------------------
230// SyncableServer constructor
231// ---------------------------------------------------------------------------
232
233impl SyncableServer<MemoryShardStore<MerkleHashVote, u32>> {
234 /// Create an empty in-memory syncable tree.
235 pub fn empty() -> Self {
236 Self {
237 tree: GenericTreeServer {
238 inner: ShardTree::new(MemoryShardStore::empty(), MAX_CHECKPOINTS),
239 latest_checkpoint: None,
240 next_position: 0,
241 },
242 blocks: BTreeMap::new(),
243 pending_leaves: Vec::new(),
244 pending_start: 0,
245 }
246 }
247}
248
249// ---------------------------------------------------------------------------
250// SyncableServer: shared impl for all S
251// ---------------------------------------------------------------------------
252
253impl<S> SyncableServer<S>
254where
255 S: ShardStore<H = MerkleHashVote, CheckpointId = u32>,
256 S::Error: std::fmt::Debug,
257{
258 /// Wrap an existing [`GenericTreeServer`] with sync tracking.
259 pub fn new(tree: GenericTreeServer<S>) -> Self {
260 Self {
261 tree,
262 blocks: BTreeMap::new(),
263 pending_leaves: Vec::new(),
264 pending_start: 0,
265 }
266 }
267
268 /// Append a single leaf and record it in the pending block.
269 pub fn append(&mut self, leaf: Fp) -> Result<u64, ShardTreeError<S::Error>> {
270 let hash = MerkleHashVote::from_fp(leaf);
271 let idx = self.tree.append(leaf)?;
272 self.pending_leaves.push(hash);
273 Ok(idx)
274 }
275
276 /// Append two leaves (e.g. new VAN + VC from `MsgCastVote`).
277 pub fn append_two(&mut self, first: Fp, second: Fp) -> Result<u64, ShardTreeError<S::Error>> {
278 let idx = self.append(first)?;
279 self.append(second)?;
280 Ok(idx)
281 }
282
283 /// Snapshot the current tree state and record block-level commitments.
284 ///
285 /// # Errors
286 /// Returns [`CheckpointError::NotMonotonic`] if `height` is not strictly
287 /// greater than the previous checkpoint height.
288 pub fn checkpoint(&mut self, height: u32) -> Result<(), CheckpointError<S::Error>> {
289 self.tree.checkpoint(height)?;
290 let commitments = BlockCommitments {
291 height,
292 start_index: self.pending_start,
293 leaves: std::mem::take(&mut self.pending_leaves),
294 };
295 self.blocks.insert(height, commitments);
296 self.pending_start = self.tree.next_position;
297 Ok(())
298 }
299
300 /// Current Merkle root (at the latest checkpoint).
301 pub fn root(&self) -> Fp {
302 self.tree.root()
303 }
304
305 /// Root at a specific checkpoint height.
306 pub fn root_at_height(&self, height: u32) -> Option<Fp> {
307 self.tree.root_at_height(height)
308 }
309
310 /// Number of leaves appended.
311 pub fn size(&self) -> u64 {
312 self.tree.size()
313 }
314
315 /// Build a Merkle path for the leaf at `position` at `anchor_height`.
316 pub fn path(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
317 self.tree.path(position, anchor_height)
318 }
319
320 /// Latest checkpoint height.
321 pub fn latest_checkpoint(&self) -> Option<u32> {
322 self.tree.latest_checkpoint
323 }
324}
325
326// ---------------------------------------------------------------------------
327// Shared impl for all GenericTreeServer<S>
328// ---------------------------------------------------------------------------
329
330impl<S> GenericTreeServer<S>
331where
332 S: shardtree::store::ShardStore<H = MerkleHashVote, CheckpointId = u32>,
333 S::Error: std::fmt::Debug,
334{
335 /// Append a single leaf (e.g. one VAN from `MsgDelegateVote`).
336 ///
337 /// The leaf is marked so witnesses can be generated for it later.
338 /// Returns the leaf index.
339 pub fn append(&mut self, leaf: Fp) -> Result<u64, ShardTreeError<S::Error>> {
340 let index = self.next_position;
341 let hash = MerkleHashVote::from_fp(leaf);
342 self.inner.append(hash, Retention::Marked)?;
343 self.next_position += 1;
344 Ok(index)
345 }
346
347 /// Append two leaves (e.g. new VAN + VC from `MsgCastVote`).
348 ///
349 /// Returns the index of the first leaf.
350 pub fn append_two(&mut self, first: Fp, second: Fp) -> Result<u64, ShardTreeError<S::Error>> {
351 let index = self.append(first)?;
352 self.append(second)?;
353 Ok(index)
354 }
355
356 /// Snapshot the current tree state at the given block height.
357 ///
358 /// Called by EndBlocker after processing all transactions in a block.
359 /// The root at this checkpoint becomes a valid anchor for ZKP #2 / ZKP #3.
360 ///
361 /// # Errors
362 /// Returns [`CheckpointError::NotMonotonic`] if `height` is not strictly
363 /// greater than the previous checkpoint height.
364 /// Returns [`CheckpointError::Tree`] if the underlying shard store fails.
365 pub fn checkpoint(&mut self, height: u32) -> Result<(), CheckpointError<S::Error>> {
366 if let Some(prev) = self.latest_checkpoint {
367 if height <= prev {
368 return Err(CheckpointError::NotMonotonic { prev, requested: height });
369 }
370 }
371 self.inner.checkpoint(height)?;
372 self.latest_checkpoint = Some(height);
373 Ok(())
374 }
375
376 /// Current Merkle root (at the latest checkpoint).
377 pub fn root(&self) -> Fp {
378 if let Some(id) = self.latest_checkpoint {
379 self.inner
380 .root_at_checkpoint_id(&id)
381 .ok()
382 .flatten()
383 .map(|h| h.0)
384 .unwrap_or_else(|| MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0)
385 } else {
386 MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0
387 }
388 }
389
390 /// Root at a specific checkpoint height (anchor lookup).
391 ///
392 /// Returns `None` if the checkpoint does not exist.
393 pub fn root_at_height(&self, height: u32) -> Option<Fp> {
394 self.inner
395 .root_at_checkpoint_id(&height)
396 .ok()
397 .flatten()
398 .map(|h| h.0)
399 }
400
401 /// Number of leaves appended.
402 pub fn size(&self) -> u64 {
403 self.next_position
404 }
405
406 /// Set the leaf count directly (e.g. to restore after a cold start when
407 /// `next_position` was not passed to [`TreeServer::new`]).
408 pub fn set_next_position(&mut self, pos: u64) {
409 self.next_position = pos;
410 }
411
412 /// Build a Merkle path for the leaf at `position`, valid at the given
413 /// checkpoint `anchor_height`.
414 ///
415 /// Returns `None` if the position or checkpoint is invalid.
416 pub fn path(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
417 let pos = incrementalmerkletree::Position::from(position);
418 self.inner
419 .witness_at_checkpoint_id(pos, &anchor_height)
420 .ok()
421 .flatten()
422 .map(MerklePath::from)
423 }
424}