Skip to main content

vote_commitment_tree/
client.rs

1//! Client-side vote commitment tree.
2//!
3//! [`TreeClient`] maintains a sparse local copy of the tree for witness generation.
4//! It syncs from a [`TreeSyncApi`] source (the server), marks positions of interest,
5//! and generates Merkle authentication paths.
6//!
7//! This mirrors how Zcash wallets use `ShardTree` via `WalletCommitmentTrees`:
8//! the client receives all leaves (no trial decryption needed since all vote-tree
9//! leaves are public), inserts them, and generates witnesses only for its own
10//! marked positions.
11
12use std::collections::BTreeSet;
13use std::fmt;
14
15use incrementalmerkletree::{Hashable, Level, Retention};
16use pasta_curves::Fp;
17use shardtree::{store::memory::MemoryShardStore, ShardTree};
18
19use crate::hash::{MerkleHashVote, MAX_CHECKPOINTS, SHARD_HEIGHT, TREE_DEPTH};
20use crate::path::MerklePath;
21use crate::sync_api::TreeSyncApi;
22
23// ---------------------------------------------------------------------------
24// SyncError
25// ---------------------------------------------------------------------------
26
27/// Errors that can occur during [`TreeClient::sync`].
28#[derive(Debug)]
29pub enum SyncError<E: fmt::Debug> {
30    /// Error from the underlying [`TreeSyncApi`] transport.
31    Api(E),
32    /// Block's `start_index` doesn't match the client's expected next position.
33    ///
34    /// This indicates missed or duplicated blocks, wrong ordering, or a
35    /// protocol-level desync between server and client.
36    StartIndexMismatch {
37        height: u32,
38        expected: u64,
39        got: u64,
40    },
41    /// Client's computed root doesn't match the server's root at a block height.
42    ///
43    /// This indicates corrupted leaf data, a hash mismatch, or a
44    /// different tree implementation between server and client.
45    RootMismatch {
46        height: u32,
47        local: Option<Fp>,
48        server: Fp,
49    },
50    /// The server ended pagination before the client reached the advertised tip.
51    IncompleteSync {
52        local_next_index: u64,
53        server_next_index: u64,
54    },
55    /// The server returned a pagination cursor that does not move forward.
56    InvalidPagination { current: u32, next: u32 },
57}
58
59impl<E: fmt::Debug> fmt::Display for SyncError<E> {
60    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
61        match self {
62            SyncError::Api(e) => write!(f, "sync API error: {:?}", e),
63            SyncError::StartIndexMismatch {
64                height,
65                expected,
66                got,
67            } => write!(
68                f,
69                "start_index mismatch at height {}: expected {}, got {}",
70                height, expected, got
71            ),
72            SyncError::RootMismatch {
73                height,
74                local,
75                server,
76            } => write!(
77                f,
78                "root mismatch at height {}: local={:?}, server={:?}",
79                height, local, server
80            ),
81            SyncError::IncompleteSync {
82                local_next_index,
83                server_next_index,
84            } => write!(
85                f,
86                "incomplete sync: local next_index={}, server next_index={}",
87                local_next_index, server_next_index
88            ),
89            SyncError::InvalidPagination { current, next } => write!(
90                f,
91                "invalid pagination cursor: current={}, next={}",
92                current, next
93            ),
94        }
95    }
96}
97
98impl<E: fmt::Debug> From<E> for SyncError<E> {
99    fn from(err: E) -> Self {
100        SyncError::Api(err)
101    }
102}
103
104// ---------------------------------------------------------------------------
105// TreeClient
106// ---------------------------------------------------------------------------
107
108/// Client-side sparse vote commitment tree.
109///
110/// Mirrors the server tree via incremental sync. Marks specific positions for
111/// witness generation:
112/// - **Wallet**: marks its own VAN position (for ZKP #2)
113/// - **Helper server**: marks delegated VC positions (for ZKP #3)
114pub struct TreeClient {
115    inner: ShardTree<MemoryShardStore<MerkleHashVote, u32>, { TREE_DEPTH as u8 }, { SHARD_HEIGHT }>,
116    /// Next leaf position expected (mirrors server's next_position).
117    next_position: u64,
118    /// Latest synced block height.
119    last_synced_height: Option<u32>,
120    /// Positions registered for witness generation.
121    ///
122    /// During [`sync`], positions in this set are inserted with `Retention::Marked`;
123    /// all other positions use `Retention::Ephemeral`. This gives ShardTree the
124    /// signal to retain the sibling hashes needed for witnesses at marked positions
125    /// while pruning everything else.
126    ///
127    /// Register positions via [`mark_position`] **before** syncing past them.
128    marked_positions: BTreeSet<u64>,
129}
130
131impl TreeClient {
132    /// Create an empty client tree.
133    pub fn empty() -> Self {
134        Self {
135            inner: ShardTree::new(MemoryShardStore::empty(), MAX_CHECKPOINTS),
136            next_position: 0,
137            last_synced_height: None,
138            marked_positions: BTreeSet::new(),
139        }
140    }
141
142    /// Register a leaf position for witness generation.
143    ///
144    /// When [`sync`] encounters this position, the leaf is inserted with
145    /// `Retention::Marked` so that [`witness`] can later produce a Merkle
146    /// authentication path for it. All other positions are inserted as
147    /// `Retention::Ephemeral` (prunable).
148    ///
149    /// Must be called **before** syncing past the position. This matches
150    /// the production pattern where the wallet knows its VAN index from
151    /// `MsgDelegateVote` before it syncs the block that contains it, and
152    /// the helper server knows the delegated VC index from the share
153    /// payload before it syncs.
154    ///
155    /// Calling this for a position that has already been synced has no
156    /// effect (the leaf is already in the tree as ephemeral).
157    pub fn mark_position(&mut self, position: u64) {
158        self.marked_positions.insert(position);
159    }
160
161    /// Sync the client tree from a [`TreeSyncApi`] source.
162    ///
163    /// Fetches block commitments from the server (incrementally, from the last
164    /// synced height) and inserts them into the local tree. Each block's leaves
165    /// are checkpointed at the block's height.
166    ///
167    /// **Retention**: Positions registered via [`mark_position`] are inserted
168    /// with `Retention::Marked` so witnesses can be generated for them.
169    /// All other positions use `Retention::Ephemeral`, allowing `ShardTree`
170    /// to prune their subtree data once it's no longer needed. This keeps
171    /// the client tree sparse — only the subtrees touching marked positions
172    /// are fully materialized.
173    ///
174    /// **Safety checks** (these catch corrupted data or protocol mismatches):
175    /// - Each block's `start_index` must match the client's expected next position.
176    /// - After checkpointing each block, the client's root is verified against the
177    ///   server's root at that height (the consistency check described in the README).
178    /// - After pagination completes, the client must match the tip advertised by
179    ///   `get_tree_state()`.
180    pub fn sync<A: TreeSyncApi>(&mut self, api: &A) -> Result<(), SyncError<A::Error>> {
181        let state = api.get_tree_state()?;
182        if state.next_index == self.next_position {
183            if state.next_index > 0 {
184                let local = self.root();
185                if local != state.root {
186                    return Err(SyncError::RootMismatch {
187                        height: state.height,
188                        local: Some(local),
189                        server: state.root,
190                    });
191                }
192            }
193            return Ok(());
194        }
195
196        let from_height = self.last_synced_height.map(|h| h + 1).unwrap_or(0);
197        let to_height = state.height;
198
199        if from_height > to_height {
200            return Ok(()); // Already up to date.
201        }
202
203        let mut page_from = from_height;
204        loop {
205            let page = api.get_block_commitments(page_from, to_height)?;
206
207            for block in &page.blocks {
208                // Validate start_index continuity: the block's first leaf index must
209                // match exactly where the client expects the next leaf. A mismatch
210                // means missed blocks, duplicates, or wrong ordering.
211                if !block.leaves.is_empty() && block.start_index != self.next_position {
212                    return Err(SyncError::StartIndexMismatch {
213                        height: block.height,
214                        expected: self.next_position,
215                        got: block.start_index,
216                    });
217                }
218
219                for leaf in &block.leaves {
220                    // Use Marked retention for positions the client registered
221                    // interest in; Ephemeral for everything else. This gives
222                    // ShardTree the signal to retain witness data only where needed.
223                    let retention = if self.marked_positions.contains(&self.next_position) {
224                        Retention::Marked
225                    } else {
226                        Retention::Ephemeral
227                    };
228                    self.inner
229                        .append(*leaf, retention)
230                        .expect("append must succeed (tree not full)");
231                    self.next_position += 1;
232                }
233
234                // Checkpoint after each block's leaves, mirroring the server's
235                // EndBlocker snapshots.
236                self.inner
237                    .checkpoint(block.height)
238                    .expect("checkpoint must succeed");
239                self.last_synced_height = Some(block.height);
240
241                // Root consistency check: verify the client's computed root matches
242                // the server's root at this height. This catches corrupted leaf data,
243                // hash mismatches, or tree implementation differences.
244                let local = self.root_at_height(block.height);
245                if local != Some(block.root) {
246                    return Err(SyncError::RootMismatch {
247                        height: block.height,
248                        local,
249                        server: block.root,
250                    });
251                }
252            }
253
254            // A zero cursor means this page completed the requested height range.
255            if page.next_from_height == 0 {
256                break;
257            }
258
259            // Nonzero cursors must move forward and stay inside the advertised
260            // tip range. Otherwise a broken server could make sync loop forever
261            // or skip past data that should have been returned.
262            if page.next_from_height <= page_from {
263                return Err(SyncError::InvalidPagination {
264                    current: page_from,
265                    next: page.next_from_height,
266                });
267            }
268            if page.next_from_height > to_height {
269                return Err(SyncError::InvalidPagination {
270                    current: page_from,
271                    next: page.next_from_height,
272                });
273            }
274            page_from = page.next_from_height;
275        }
276
277        // Pagination is only complete if the leaves we applied reach the same
278        // next index the server advertised before the loop started.
279        if self.next_position != state.next_index {
280            return Err(SyncError::IncompleteSync {
281                local_next_index: self.next_position,
282                server_next_index: state.next_index,
283            });
284        }
285
286        // The final root check binds the full paginated sync result to the
287        // tree state fetched at the beginning of sync.
288        if state.next_index > 0 {
289            let local = self.root();
290            if local != state.root {
291                return Err(SyncError::RootMismatch {
292                    height: state.height,
293                    local: Some(local),
294                    server: state.root,
295                });
296            }
297        }
298
299        Ok(())
300    }
301
302    /// Generate a Merkle authentication path for the leaf at `position`,
303    /// valid at the given checkpoint `anchor_height`.
304    ///
305    /// Returns `None` if the position or checkpoint is invalid, or if the
306    /// position was not marked.
307    pub fn witness(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
308        let pos = incrementalmerkletree::Position::from(position);
309        self.inner
310            .witness_at_checkpoint_id(pos, &anchor_height)
311            .ok()
312            .flatten()
313            .map(MerklePath::from)
314    }
315
316    /// Root at a specific checkpoint height (for anchor verification).
317    ///
318    /// After sync, this should match the server's root at the same height.
319    pub fn root_at_height(&self, height: u32) -> Option<Fp> {
320        self.inner
321            .root_at_checkpoint_id(&height)
322            .ok()
323            .flatten()
324            .map(|h| h.0)
325    }
326
327    /// Current root (at the latest synced checkpoint).
328    pub fn root(&self) -> Fp {
329        if let Some(id) = self.last_synced_height {
330            self.inner
331                .root_at_checkpoint_id(&id)
332                .ok()
333                .flatten()
334                .map(|h| h.0)
335                .unwrap_or_else(|| MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0)
336        } else {
337            MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0
338        }
339    }
340
341    /// Number of leaves synced.
342    pub fn size(&self) -> u64 {
343        self.next_position
344    }
345
346    /// Latest synced block height.
347    pub fn last_synced_height(&self) -> Option<u32> {
348        self.last_synced_height
349    }
350}