Skip to main content

vote_commitment_tree/
client.rs

1//! Client-side vote commitment tree.
2//!
3//! [`TreeClient`] maintains a sparse local copy of the tree for witness generation.
4//! It syncs from a [`TreeSyncApi`] source (the server), marks positions of interest,
5//! and generates Merkle authentication paths.
6//!
7//! This mirrors how Zcash wallets use `ShardTree` via `WalletCommitmentTrees`:
8//! the client receives all leaves (no trial decryption needed since all vote-tree
9//! leaves are public), inserts them, and generates witnesses only for its own
10//! marked positions.
11
12use std::collections::BTreeSet;
13use std::fmt;
14
15use incrementalmerkletree::{Hashable, Level, Retention};
16use pasta_curves::Fp;
17use shardtree::{store::memory::MemoryShardStore, ShardTree};
18
19use crate::hash::{MerkleHashVote, MAX_CHECKPOINTS, SHARD_HEIGHT, TREE_DEPTH};
20use crate::path::MerklePath;
21use crate::sync_api::TreeSyncApi;
22
23/// Keep each leaf query window within the chain's public API cap
24/// (`to_height - from_height <= 1000`).
25const MAX_SYNC_HEIGHT_SPAN: u32 = 1000;
26
27// ---------------------------------------------------------------------------
28// SyncError
29// ---------------------------------------------------------------------------
30
31/// Errors that can occur during [`TreeClient::sync`].
32#[derive(Debug)]
33pub enum SyncError<E: fmt::Debug> {
34    /// Error from the underlying [`TreeSyncApi`] transport.
35    Api(E),
36    /// Block's `start_index` doesn't match the client's expected next position.
37    ///
38    /// This indicates missed or duplicated blocks, wrong ordering, or a
39    /// protocol-level desync between server and client.
40    StartIndexMismatch {
41        height: u32,
42        expected: u64,
43        got: u64,
44    },
45    /// Client's computed root doesn't match the server's root at a block height.
46    ///
47    /// This indicates corrupted leaf data, a hash mismatch, or a
48    /// different tree implementation between server and client.
49    RootMismatch {
50        height: u32,
51        local: Option<Fp>,
52        server: Fp,
53    },
54}
55
56impl<E: fmt::Debug> fmt::Display for SyncError<E> {
57    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58        match self {
59            SyncError::Api(e) => write!(f, "sync API error: {:?}", e),
60            SyncError::StartIndexMismatch {
61                height,
62                expected,
63                got,
64            } => write!(
65                f,
66                "start_index mismatch at height {}: expected {}, got {}",
67                height, expected, got
68            ),
69            SyncError::RootMismatch {
70                height,
71                local,
72                server,
73            } => write!(
74                f,
75                "root mismatch at height {}: local={:?}, server={:?}",
76                height, local, server
77            ),
78        }
79    }
80}
81
82impl<E: fmt::Debug> From<E> for SyncError<E> {
83    fn from(err: E) -> Self {
84        SyncError::Api(err)
85    }
86}
87
88// ---------------------------------------------------------------------------
89// TreeClient
90// ---------------------------------------------------------------------------
91
92/// Client-side sparse vote commitment tree.
93///
94/// Mirrors the server tree via incremental sync. Marks specific positions for
95/// witness generation:
96/// - **Wallet**: marks its own VAN position (for ZKP #2)
97/// - **Helper server**: marks delegated VC positions (for ZKP #3)
98pub struct TreeClient {
99    inner: ShardTree<
100        MemoryShardStore<MerkleHashVote, u32>,
101        { TREE_DEPTH as u8 },
102        { SHARD_HEIGHT },
103    >,
104    /// Next leaf position expected (mirrors server's next_position).
105    next_position: u64,
106    /// Latest synced block height.
107    last_synced_height: Option<u32>,
108    /// Positions registered for witness generation.
109    ///
110    /// During [`sync`], positions in this set are inserted with `Retention::Marked`;
111    /// all other positions use `Retention::Ephemeral`. This gives ShardTree the
112    /// signal to retain the sibling hashes needed for witnesses at marked positions
113    /// while pruning everything else.
114    ///
115    /// Register positions via [`mark_position`] **before** syncing past them.
116    marked_positions: BTreeSet<u64>,
117}
118
119impl TreeClient {
120    /// Create an empty client tree.
121    pub fn empty() -> Self {
122        Self {
123            inner: ShardTree::new(MemoryShardStore::empty(), MAX_CHECKPOINTS),
124            next_position: 0,
125            last_synced_height: None,
126            marked_positions: BTreeSet::new(),
127        }
128    }
129
130    /// Register a leaf position for witness generation.
131    ///
132    /// When [`sync`] encounters this position, the leaf is inserted with
133    /// `Retention::Marked` so that [`witness`] can later produce a Merkle
134    /// authentication path for it. All other positions are inserted as
135    /// `Retention::Ephemeral` (prunable).
136    ///
137    /// Must be called **before** syncing past the position. This matches
138    /// the production pattern where the wallet knows its VAN index from
139    /// `MsgDelegateVote` before it syncs the block that contains it, and
140    /// the helper server knows the delegated VC index from the share
141    /// payload before it syncs.
142    ///
143    /// Calling this for a position that has already been synced has no
144    /// effect (the leaf is already in the tree as ephemeral).
145    pub fn mark_position(&mut self, position: u64) {
146        self.marked_positions.insert(position);
147    }
148
149    /// Sync the client tree from a [`TreeSyncApi`] source.
150    ///
151    /// Fetches block commitments from the server (incrementally, from the last
152    /// synced height) and inserts them into the local tree. Each block's leaves
153    /// are checkpointed at the block's height.
154    ///
155    /// **Retention**: Positions registered via [`mark_position`] are inserted
156    /// with `Retention::Marked` so witnesses can be generated for them.
157    /// All other positions use `Retention::Ephemeral`, allowing `ShardTree`
158    /// to prune their subtree data once it's no longer needed. This keeps
159    /// the client tree sparse — only the subtrees touching marked positions
160    /// are fully materialized.
161    ///
162    /// **Safety checks** (these catch corrupted data or protocol mismatches):
163    /// - Each block's `start_index` must match the client's expected next position.
164    /// - After checkpointing each block, the client's root is verified against the
165    ///   server's root at that height (the consistency check described in the README).
166    pub fn sync<A: TreeSyncApi>(&mut self, api: &A) -> Result<(), SyncError<A::Error>> {
167        let state = api.get_tree_state()?;
168        let from_height = self.last_synced_height.map(|h| h + 1).unwrap_or(1);
169        let to_height = state.height;
170
171        if from_height > to_height {
172            return Ok(()); // Already up to date.
173        }
174
175        // Fetch in bounded windows so long catch-up syncs never exceed the
176        // chain API range cap.
177        let mut chunk_from = from_height;
178        while chunk_from <= to_height {
179            let chunk_to = chunk_from
180                .saturating_add(MAX_SYNC_HEIGHT_SPAN)
181                .min(to_height);
182            let blocks = api.get_block_commitments(chunk_from, chunk_to)?;
183
184            for block in &blocks {
185                // Validate start_index continuity: the block's first leaf index must
186                // match exactly where the client expects the next leaf. A mismatch
187                // means missed blocks, duplicates, or wrong ordering.
188                if !block.leaves.is_empty() && block.start_index != self.next_position {
189                    return Err(SyncError::StartIndexMismatch {
190                        height: block.height,
191                        expected: self.next_position,
192                        got: block.start_index,
193                    });
194                }
195
196                for leaf in &block.leaves {
197                    // Use Marked retention for positions the client registered
198                    // interest in; Ephemeral for everything else. This gives
199                    // ShardTree the signal to retain witness data only where needed.
200                    let retention = if self.marked_positions.contains(&self.next_position) {
201                        Retention::Marked
202                    } else {
203                        Retention::Ephemeral
204                    };
205                    self.inner
206                        .append(*leaf, retention)
207                        .expect("append must succeed (tree not full)");
208                    self.next_position += 1;
209                }
210
211                // Checkpoint after each block's leaves, mirroring the server's
212                // EndBlocker snapshots.
213                self.inner
214                    .checkpoint(block.height)
215                    .expect("checkpoint must succeed");
216                self.last_synced_height = Some(block.height);
217
218                // Root consistency check: verify the client's computed root matches
219                // the server's root at this height. This catches corrupted leaf data,
220                // hash mismatches, or tree implementation differences.
221                let server_root = api.get_root_at_height(block.height)?;
222                if let Some(expected) = server_root {
223                    let local = self.root_at_height(block.height);
224                    if local != Some(expected) {
225                        return Err(SyncError::RootMismatch {
226                            height: block.height,
227                            local,
228                            server: expected,
229                        });
230                    }
231                }
232            }
233
234            chunk_from = match chunk_to.checked_add(1) {
235                Some(next) => next,
236                None => break,
237            };
238        }
239
240        Ok(())
241    }
242
243    /// Generate a Merkle authentication path for the leaf at `position`,
244    /// valid at the given checkpoint `anchor_height`.
245    ///
246    /// Returns `None` if the position or checkpoint is invalid, or if the
247    /// position was not marked.
248    pub fn witness(&self, position: u64, anchor_height: u32) -> Option<MerklePath> {
249        let pos = incrementalmerkletree::Position::from(position);
250        self.inner
251            .witness_at_checkpoint_id(pos, &anchor_height)
252            .ok()
253            .flatten()
254            .map(MerklePath::from)
255    }
256
257    /// Root at a specific checkpoint height (for anchor verification).
258    ///
259    /// After sync, this should match the server's root at the same height.
260    pub fn root_at_height(&self, height: u32) -> Option<Fp> {
261        self.inner
262            .root_at_checkpoint_id(&height)
263            .ok()
264            .flatten()
265            .map(|h| h.0)
266    }
267
268    /// Current root (at the latest synced checkpoint).
269    pub fn root(&self) -> Fp {
270        if let Some(id) = self.last_synced_height {
271            self.inner
272                .root_at_checkpoint_id(&id)
273                .ok()
274                .flatten()
275                .map(|h| h.0)
276                .unwrap_or_else(|| MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0)
277        } else {
278            MerkleHashVote::empty_root(Level::from(TREE_DEPTH as u8)).0
279        }
280    }
281
282    /// Number of leaves synced.
283    pub fn size(&self) -> u64 {
284        self.next_position
285    }
286
287    /// Latest synced block height.
288    pub fn last_synced_height(&self) -> Option<u32> {
289        self.last_synced_height
290    }
291}