Skip to main content

aion_context/
transparency_log.rs

1// SPDX-License-Identifier: MIT OR Apache-2.0
2//! Aion-native transparency log — RFC-0025.
3//!
4//! Append-only Merkle log over BLAKE3, RFC-6962-compatible in
5//! structure (split-point MTH, audit-path inclusion proofs) and
6//! domain-separated from every other aion signed object.
7//!
8//! Phase A, this module: in-memory log + inclusion proofs +
9//! operator-signed tree heads, all offline. Phase B adds frontier
10//! caching, consistency proofs, and persistence. Phase C adds a
11//! Rekor adapter for wire interop.
12//!
13//! # Example
14//!
15//! ```
16//! use aion_context::transparency_log::{TransparencyLog, LogEntryKind, verify_inclusion_proof};
17//! use aion_context::crypto::SigningKey;
18//!
19//! let mut log = TransparencyLog::new();
20//! let payload = b"attestation bytes";
21//! let seq = log.append(LogEntryKind::VersionAttestation, payload, 42).unwrap();
22//!
23//! // Self-contained verification: a verifier holding the log and a
24//! // pinned root needs no access to the original payload.
25//! let proof = log.inclusion_proof(seq).unwrap();
26//! let leaf = log.leaf_hash_at(seq).unwrap();
27//! verify_inclusion_proof(
28//!     leaf,
29//!     proof.leaf_index,
30//!     proof.tree_size,
31//!     &proof.audit_path,
32//!     log.root_hash(),
33//! ).unwrap();
34//!
35//! let operator = SigningKey::generate();
36//! log.set_operator(operator.verifying_key());
37//! let sth = log.sign_tree_head(&operator);
38//! assert!(log.verify_tree_head(&sth).is_ok());
39//! ```
40
41use crate::crypto::{SigningKey, VerifyingKey};
42use crate::{AionError, Result};
43
44/// Domain separator for leaf-data hashing.
45pub const LOG_LEAF_DOMAIN: &[u8] = b"AION_V2_LOG_LEAF_V1\0";
46
47/// Domain separator for internal-node hashing.
48pub const LOG_NODE_DOMAIN: &[u8] = b"AION_V2_LOG_NODE_V1\0";
49
50/// Domain separator for signed tree heads.
51pub const LOG_STH_DOMAIN: &[u8] = b"AION_V2_LOG_STH_V1\0";
52
53/// Domain separator for the empty-tree sentinel root.
54pub const LOG_EMPTY_DOMAIN: &[u8] = b"AION_V2_LOG_EMPTY_V1\0";
55
56/// What kind of object is recorded in a log leaf.
57#[repr(u16)]
58#[derive(Debug, Clone, Copy, PartialEq, Eq)]
59pub enum LogEntryKind {
60    /// Multi-party attestation over a version (RFC-0021).
61    VersionAttestation = 1,
62    /// Signature over an external-artifact manifest (RFC-0022).
63    ManifestSignature = 2,
64    /// Key rotation record (RFC-0028).
65    KeyRotation = 3,
66    /// Key revocation record (RFC-0028).
67    KeyRevocation = 4,
68    /// SLSA v1.1 provenance statement (RFC-0024).
69    SlsaStatement = 5,
70    /// Generic DSSE envelope (RFC-0023).
71    DsseEnvelope = 6,
72}
73
74impl LogEntryKind {
75    /// Convert a raw `u16` to a known kind.
76    ///
77    /// # Errors
78    ///
79    /// Returns `Err` for discriminants not defined by this enum.
80    pub fn from_u16(value: u16) -> Result<Self> {
81        match value {
82            1 => Ok(Self::VersionAttestation),
83            2 => Ok(Self::ManifestSignature),
84            3 => Ok(Self::KeyRotation),
85            4 => Ok(Self::KeyRevocation),
86            5 => Ok(Self::SlsaStatement),
87            6 => Ok(Self::DsseEnvelope),
88            other => Err(AionError::InvalidFormat {
89                reason: format!("Unknown log entry kind: {other}"),
90            }),
91        }
92    }
93}
94
95/// One leaf in the transparency log.
96#[derive(Debug, Clone)]
97pub struct LogEntry {
98    /// Which kind of object this leaf carries.
99    pub kind: LogEntryKind,
100    /// 0-indexed position in the log.
101    pub seq: u64,
102    /// aion version number at submission time.
103    pub timestamp_version: u64,
104    /// BLAKE3 hash of the preceding leaf (`[0u8; 32]` for `seq == 0`).
105    pub prev_leaf_hash: [u8; 32],
106    /// BLAKE3 hash of the raw payload bytes.
107    pub payload_hash: [u8; 32],
108}
109
110/// An inclusion proof: the siblings along the path from a leaf to
111/// the Merkle root, innermost first.
112#[derive(Debug, Clone, PartialEq, Eq)]
113pub struct InclusionProof {
114    /// Leaf index the proof refers to.
115    pub leaf_index: u64,
116    /// Tree size at the time the proof was generated.
117    pub tree_size: u64,
118    /// Merkle audit path (siblings, innermost first).
119    pub audit_path: Vec<[u8; 32]>,
120}
121
122/// A tree head signed by the log operator.
123#[derive(Debug, Clone, PartialEq, Eq)]
124pub struct SignedTreeHead {
125    /// Number of leaves in the tree at signing time.
126    pub tree_size: u64,
127    /// Merkle root hash at that tree size.
128    pub root_hash: [u8; 32],
129    /// Ed25519 signature by the operator master key over the
130    /// canonical STH bytes.
131    pub operator_signature: [u8; 64],
132}
133
134/// Append-only Merkle log.
135///
136/// Maintains an internal subtree-roots cache keyed by `(level,
137/// index)` so [`Self::inclusion_proof`] runs in O(log n) instead of
138/// O(n) (issue #36). The cache is in-memory only — no on-disk
139/// format change.
140///
141/// `subtree_roots[level][j]` is the hash of the COMPLETE 2^level
142/// subtree covering leaves `[j*2^level, (j+1)*2^level)`. Only
143/// fully-populated subtrees are stored; partial right-edge subtrees
144/// are recomputed on demand from the cached children (still
145/// O(log n) total).
146#[derive(Debug, Default)]
147pub struct TransparencyLog {
148    entries: Vec<LogEntry>,
149    leaf_hashes: Vec<[u8; 32]>,
150    /// `subtree_roots[level][j]` covers leaves `[j*2^level, (j+1)*2^level)`.
151    /// `subtree_roots[0]` mirrors `leaf_hashes`.
152    subtree_roots: Vec<Vec<[u8; 32]>>,
153    operator_master: Option<VerifyingKey>,
154}
155
156/// Compute the canonical leaf-data bytes and return their
157/// domain-tagged BLAKE3 hash.
158#[must_use]
159pub fn leaf_hash(
160    kind: LogEntryKind,
161    seq: u64,
162    timestamp_version: u64,
163    prev_leaf_hash: &[u8; 32],
164    payload: &[u8],
165) -> [u8; 32] {
166    let payload_digest = crate::crypto::hash(payload);
167    let canonical = canonical_leaf_bytes(
168        kind,
169        seq,
170        timestamp_version,
171        prev_leaf_hash,
172        &payload_digest,
173    );
174    let mut hasher = blake3::Hasher::new();
175    hasher.update(LOG_LEAF_DOMAIN);
176    hasher.update(&canonical);
177    *hasher.finalize().as_bytes()
178}
179
180fn canonical_leaf_bytes(
181    kind: LogEntryKind,
182    seq: u64,
183    timestamp_version: u64,
184    prev_leaf_hash: &[u8; 32],
185    payload_hash: &[u8; 32],
186) -> Vec<u8> {
187    let mut buf = Vec::with_capacity(2 + 8 + 8 + 32 + 32);
188    buf.extend_from_slice(&(kind as u16).to_le_bytes());
189    buf.extend_from_slice(&seq.to_le_bytes());
190    buf.extend_from_slice(&timestamp_version.to_le_bytes());
191    buf.extend_from_slice(prev_leaf_hash);
192    buf.extend_from_slice(payload_hash);
193    buf
194}
195
196fn node_hash(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
197    let mut hasher = blake3::Hasher::new();
198    hasher.update(LOG_NODE_DOMAIN);
199    hasher.update(left);
200    hasher.update(right);
201    *hasher.finalize().as_bytes()
202}
203
204fn empty_root() -> [u8; 32] {
205    let mut hasher = blake3::Hasher::new();
206    hasher.update(LOG_EMPTY_DOMAIN);
207    *hasher.finalize().as_bytes()
208}
209
210/// Largest power of two strictly less than `n` (RFC 6962 split
211/// point). Panics would be on `n < 2`; guarded by caller.
212const fn split_point(n: usize) -> usize {
213    let mut k = 1usize;
214    while k.saturating_mul(2) < n {
215        k = k.saturating_mul(2);
216    }
217    k
218}
219
220/// Recompute a Merkle root from a leaf + audit path, mirroring the
221/// construction used by [`audit_path`]. Returns `Err` if the path
222/// is the wrong length for `(leaf_index, tree_size)`.
223fn compute_root_from_proof(
224    leaf: [u8; 32],
225    leaf_index: usize,
226    tree_size: usize,
227    proof: &[[u8; 32]],
228) -> Result<[u8; 32]> {
229    if tree_size == 0 {
230        return Err(AionError::InvalidFormat {
231            reason: "tree_size == 0 in inclusion proof".to_string(),
232        });
233    }
234    if leaf_index >= tree_size {
235        return Err(AionError::InvalidFormat {
236            reason: "leaf_index >= tree_size".to_string(),
237        });
238    }
239    if tree_size == 1 {
240        if !proof.is_empty() {
241            return Err(AionError::InvalidFormat {
242                reason: "proof is longer than expected for tree_size=1".to_string(),
243            });
244        }
245        return Ok(leaf);
246    }
247    let k = split_point(tree_size);
248    if proof.is_empty() {
249        return Err(AionError::InvalidFormat {
250            reason: "proof is shorter than expected".to_string(),
251        });
252    }
253    let outer_sibling_index = proof.len().saturating_sub(1);
254    let outer_sibling =
255        *proof
256            .get(outer_sibling_index)
257            .ok_or_else(|| AionError::InvalidFormat {
258                reason: "proof index underflow".to_string(),
259            })?;
260    let inner_proof = proof.get(..outer_sibling_index).unwrap_or(&[]);
261    if leaf_index < k {
262        let left = compute_root_from_proof(leaf, leaf_index, k, inner_proof)?;
263        Ok(node_hash(&left, &outer_sibling))
264    } else {
265        let right_index = leaf_index.saturating_sub(k);
266        let right_size = tree_size.saturating_sub(k);
267        let right = compute_root_from_proof(leaf, right_index, right_size, inner_proof)?;
268        Ok(node_hash(&outer_sibling, &right))
269    }
270}
271
272/// Verify an inclusion proof: given a leaf hash, the leaf's index,
273/// the tree size at proof-generation time, the audit path, and the
274/// pinned root hash, check that the leaf is in the tree.
275///
276/// # Errors
277///
278/// Returns `Err` if the proof is malformed or the recomputed root
279/// differs from `expected_root`.
280pub fn verify_inclusion_proof(
281    leaf_hash: [u8; 32],
282    leaf_index: u64,
283    tree_size: u64,
284    proof: &[[u8; 32]],
285    expected_root: [u8; 32],
286) -> Result<()> {
287    let leaf_index_usize = usize::try_from(leaf_index).map_err(|_| AionError::InvalidFormat {
288        reason: "leaf_index exceeds usize".to_string(),
289    })?;
290    let tree_size_usize = usize::try_from(tree_size).map_err(|_| AionError::InvalidFormat {
291        reason: "tree_size exceeds usize".to_string(),
292    })?;
293    let computed = compute_root_from_proof(leaf_hash, leaf_index_usize, tree_size_usize, proof)?;
294    if computed != expected_root {
295        return Err(AionError::InvalidFormat {
296            reason: "inclusion proof does not recompute to expected root".to_string(),
297        });
298    }
299    Ok(())
300}
301
302impl TransparencyLog {
303    /// Construct an empty log with no operator master key set.
304    #[must_use]
305    pub fn new() -> Self {
306        Self::default()
307    }
308
309    /// Register the operator master key used to verify STHs.
310    pub fn set_operator(&mut self, master_key: VerifyingKey) {
311        self.operator_master = Some(master_key);
312    }
313
314    /// Number of leaves currently in the log.
315    #[must_use]
316    pub fn tree_size(&self) -> u64 {
317        self.entries.len() as u64
318    }
319
320    /// Current Merkle root hash. Returns the empty-tree sentinel
321    /// when the log has no entries.
322    ///
323    /// Uses the subtree-roots cache (issue #36): O(log n) for any
324    /// tree size, vs O(n) before the cache.
325    #[must_use]
326    pub fn root_hash(&self) -> [u8; 32] {
327        if self.leaf_hashes.is_empty() {
328            return empty_root();
329        }
330        self.cached_subtree_root(0, self.leaf_hashes.len())
331    }
332
333    /// Look up the entry at `index`, if any.
334    #[must_use]
335    pub fn entry(&self, index: u64) -> Option<&LogEntry> {
336        let idx = usize::try_from(index).ok()?;
337        self.entries.get(idx)
338    }
339
340    /// All entries in log order.
341    #[must_use]
342    pub fn entries(&self) -> &[LogEntry] {
343        &self.entries
344    }
345
346    /// Return the stored leaf hash for the entry at `index`, if any.
347    ///
348    /// Pairs with [`Self::inclusion_proof`] and
349    /// [`verify_inclusion_proof`]: a verifier holding only the log
350    /// and a pinned root hash can verify any entry's inclusion
351    /// without needing the original payload bytes.
352    ///
353    /// Returns `None` when `index >= tree_size()` or when `index`
354    /// does not fit in `usize`.
355    #[must_use]
356    pub fn leaf_hash_at(&self, index: u64) -> Option<[u8; 32]> {
357        let idx = usize::try_from(index).ok()?;
358        self.leaf_hashes.get(idx).copied()
359    }
360
361    /// Append a new leaf and return its sequence number.
362    ///
363    /// # Errors
364    ///
365    /// Returns `Err` on arithmetic overflow of the sequence counter
366    /// (unreachable in practice below 2^64 entries).
367    pub fn append(
368        &mut self,
369        kind: LogEntryKind,
370        payload: &[u8],
371        timestamp_version: u64,
372    ) -> Result<u64> {
373        let seq = self.entries.len() as u64;
374        let prev_leaf_hash = self.leaf_hashes.last().copied().unwrap_or([0u8; 32]);
375        let hash = leaf_hash(kind, seq, timestamp_version, &prev_leaf_hash, payload);
376        let payload_digest = crate::crypto::hash(payload);
377        let entry = LogEntry {
378            kind,
379            seq,
380            timestamp_version,
381            prev_leaf_hash,
382            payload_hash: payload_digest,
383        };
384        self.entries.push(entry);
385        self.leaf_hashes.push(hash);
386        self.cascade_subtree_roots(hash);
387        Ok(seq)
388    }
389
390    /// Issue #36 — incremental cache update.
391    ///
392    /// After pushing a new leaf hash, walk up the tree completing
393    /// every newly-finished `2^k` subtree. A subtree at level `k`
394    /// completes whenever `leaf_hashes.len()` becomes a multiple of
395    /// `2^k`. At each level, the new subtree root is
396    /// `node_hash(left_sibling, right_just_completed)` where the
397    /// right side is the entry we just pushed at level `k-1` and
398    /// the left sibling is the previous entry at the same level.
399    ///
400    /// Total work per append: O(log n) amortized — each leaf rises
401    /// at most log2(n) levels over the life of the log.
402    #[allow(clippy::arithmetic_side_effects)] // bounded by log2(usize::MAX) iterations
403    #[allow(clippy::indexing_slicing)] // indices are bounded by lower_len above
404    fn cascade_subtree_roots(&mut self, leaf: [u8; 32]) {
405        if self.subtree_roots.is_empty() {
406            self.subtree_roots.push(Vec::new());
407        }
408        self.subtree_roots[0].push(leaf);
409
410        let mut level: usize = 0;
411        loop {
412            let lower_len = self.subtree_roots[level].len();
413            // A pair just completed at `level` iff the lower vec's
414            // length is even — the rightmost two entries combine
415            // into a new entry one level up.
416            if lower_len % 2 != 0 {
417                break;
418            }
419            let right_idx = lower_len - 1;
420            let left_idx = lower_len - 2;
421            let combined = node_hash(
422                &self.subtree_roots[level][left_idx],
423                &self.subtree_roots[level][right_idx],
424            );
425            level += 1;
426            if self.subtree_roots.len() <= level {
427                self.subtree_roots.push(Vec::new());
428            }
429            self.subtree_roots[level].push(combined);
430        }
431    }
432
433    /// Compute the Merkle Tree Hash of `leaves[start..start+len]`
434    /// using the subtree cache where possible.
435    ///
436    /// Falls back to recursive combination of cached sub-peaks for
437    /// partial right-edge subtrees. Pure (does not mutate the
438    /// cache).
439    #[allow(clippy::arithmetic_side_effects)] // bounded by tree depth
440    #[allow(clippy::indexing_slicing)] // start < leaf_hashes.len() by debug_assert
441    fn cached_subtree_root(&self, start: usize, len: usize) -> [u8; 32] {
442        debug_assert!(start + len <= self.leaf_hashes.len());
443        if len == 0 {
444            return empty_root();
445        }
446        if len == 1 {
447            return self.leaf_hashes[start];
448        }
449        // If (start, len) is a complete cached subtree, return the
450        // cached hash directly.
451        if len.is_power_of_two() && start % len == 0 {
452            let level = len.trailing_zeros() as usize;
453            let j = start / len;
454            if let Some(level_vec) = self.subtree_roots.get(level) {
455                if let Some(h) = level_vec.get(j) {
456                    return *h;
457                }
458            }
459        }
460        // Fall back to RFC 6962 recursive split. The cached path
461        // gets short-circuited at the first complete sub-peak; the
462        // remainder (right-edge partial) recurses but each step
463        // halves at most O(log n) times.
464        let k = split_point(len);
465        let left = self.cached_subtree_root(start, k);
466        let right = self.cached_subtree_root(start + k, len - k);
467        node_hash(&left, &right)
468    }
469
470    /// Cache-aware audit-path construction. RFC 6962 split-and-recurse,
471    /// but every sibling subtree is resolved via [`Self::cached_subtree_root`].
472    #[allow(clippy::arithmetic_side_effects)] // bounded by tree depth
473    fn cached_audit_path(&self, m: usize, range_start: usize, range_len: usize) -> Vec<[u8; 32]> {
474        if range_len <= 1 {
475            return Vec::new();
476        }
477        let k = split_point(range_len);
478        if m < range_start + k {
479            let mut path = self.cached_audit_path(m, range_start, k);
480            let sib = self.cached_subtree_root(range_start + k, range_len - k);
481            path.push(sib);
482            path
483        } else {
484            let mut path = self.cached_audit_path(m, range_start + k, range_len - k);
485            let sib = self.cached_subtree_root(range_start, k);
486            path.push(sib);
487            path
488        }
489    }
490
491    /// Generate an inclusion proof for the leaf at `leaf_index`.
492    ///
493    /// # Errors
494    ///
495    /// Returns `Err` if `leaf_index >= tree_size`.
496    pub fn inclusion_proof(&self, leaf_index: u64) -> Result<InclusionProof> {
497        let idx = usize::try_from(leaf_index).map_err(|_| AionError::InvalidFormat {
498            reason: "leaf_index exceeds usize".to_string(),
499        })?;
500        if idx >= self.leaf_hashes.len() {
501            return Err(AionError::InvalidFormat {
502                reason: format!(
503                    "leaf_index {idx} out of range (tree_size {})",
504                    self.leaf_hashes.len()
505                ),
506            });
507        }
508        let path = self.cached_audit_path(idx, 0, self.leaf_hashes.len());
509        Ok(InclusionProof {
510            leaf_index,
511            tree_size: self.tree_size(),
512            audit_path: path,
513        })
514    }
515
516    /// Canonical bytes of the current tree head, used as the
517    /// message the operator signs.
518    #[must_use]
519    pub fn canonical_tree_head(&self) -> Vec<u8> {
520        canonical_sth_bytes(self.tree_size(), &self.root_hash())
521    }
522
523    /// Produce a [`SignedTreeHead`] for the current state.
524    #[must_use]
525    pub fn sign_tree_head(&self, operator_key: &SigningKey) -> SignedTreeHead {
526        let tree_size = self.tree_size();
527        let root_hash = self.root_hash();
528        let message = canonical_sth_bytes(tree_size, &root_hash);
529        let operator_signature = operator_key.sign(&message);
530        SignedTreeHead {
531            tree_size,
532            root_hash,
533            operator_signature,
534        }
535    }
536
537    /// Verify a [`SignedTreeHead`] against the registered operator
538    /// master key **and** against the log's current root.
539    ///
540    /// # Errors
541    ///
542    /// Returns `Err` if no operator is registered, if the signature
543    /// does not verify, or if the STH's `root_hash` does not match
544    /// the log's current root.
545    pub fn verify_tree_head(&self, sth: &SignedTreeHead) -> Result<()> {
546        let master = self
547            .operator_master
548            .as_ref()
549            .ok_or_else(|| AionError::InvalidFormat {
550                reason: "no operator master key registered".to_string(),
551            })?;
552        let message = canonical_sth_bytes(sth.tree_size, &sth.root_hash);
553        master.verify(&message, &sth.operator_signature)?;
554        if sth.tree_size != self.tree_size() || sth.root_hash != self.root_hash() {
555            return Err(AionError::InvalidFormat {
556                reason: "STH does not match current log state".to_string(),
557            });
558        }
559        Ok(())
560    }
561}
562
563fn canonical_sth_bytes(tree_size: u64, root_hash: &[u8; 32]) -> Vec<u8> {
564    let capacity = LOG_STH_DOMAIN.len().saturating_add(8).saturating_add(32);
565    let mut buf = Vec::with_capacity(capacity);
566    buf.extend_from_slice(LOG_STH_DOMAIN);
567    buf.extend_from_slice(&tree_size.to_le_bytes());
568    buf.extend_from_slice(root_hash);
569    buf
570}
571
572#[cfg(test)]
573#[allow(
574    clippy::unwrap_used,
575    clippy::indexing_slicing,
576    clippy::arithmetic_side_effects
577)]
578mod tests {
579    use super::*;
580
581    #[test]
582    fn empty_log_has_empty_root_sentinel() {
583        let log = TransparencyLog::new();
584        assert_eq!(log.tree_size(), 0);
585        assert_eq!(log.root_hash(), empty_root());
586    }
587
588    #[test]
589    fn append_increments_tree_size() {
590        let mut log = TransparencyLog::new();
591        log.append(LogEntryKind::VersionAttestation, b"a", 1)
592            .unwrap();
593        log.append(LogEntryKind::ManifestSignature, b"b", 2)
594            .unwrap();
595        log.append(LogEntryKind::KeyRotation, b"c", 3).unwrap();
596        assert_eq!(log.tree_size(), 3);
597        assert_eq!(log.entries().len(), 3);
598    }
599
600    #[test]
601    fn leaf_chain_links_prev_hashes() {
602        let mut log = TransparencyLog::new();
603        log.append(LogEntryKind::VersionAttestation, b"a", 1)
604            .unwrap();
605        log.append(LogEntryKind::ManifestSignature, b"b", 2)
606            .unwrap();
607        let e0 = log.entry(0).unwrap();
608        let e1 = log.entry(1).unwrap();
609        let expected_prev = leaf_hash(
610            e0.kind,
611            e0.seq,
612            e0.timestamp_version,
613            &e0.prev_leaf_hash,
614            b"a",
615        );
616        assert_eq!(e1.prev_leaf_hash, expected_prev);
617        assert_eq!(e0.prev_leaf_hash, [0u8; 32]);
618    }
619
620    #[test]
621    fn inclusion_proof_verifies_for_every_leaf() {
622        let mut log = TransparencyLog::new();
623        let payloads: Vec<&[u8]> = vec![b"one", b"two", b"three", b"four", b"five"];
624        let kinds = [
625            LogEntryKind::VersionAttestation,
626            LogEntryKind::ManifestSignature,
627            LogEntryKind::KeyRotation,
628            LogEntryKind::SlsaStatement,
629            LogEntryKind::DsseEnvelope,
630        ];
631        for (i, p) in payloads.iter().enumerate() {
632            log.append(kinds[i], p, (i as u64) + 1).unwrap();
633        }
634        let root = log.root_hash();
635        for (i, p) in payloads.iter().enumerate() {
636            let entry = log.entry(i as u64).unwrap();
637            let proof = log.inclusion_proof(i as u64).unwrap();
638            let leaf = leaf_hash(
639                kinds[i],
640                entry.seq,
641                entry.timestamp_version,
642                &entry.prev_leaf_hash,
643                p,
644            );
645            verify_inclusion_proof(
646                leaf,
647                proof.leaf_index,
648                proof.tree_size,
649                &proof.audit_path,
650                root,
651            )
652            .unwrap();
653        }
654    }
655
656    #[test]
657    fn inclusion_proof_rejects_out_of_range_index() {
658        let mut log = TransparencyLog::new();
659        log.append(LogEntryKind::VersionAttestation, b"a", 1)
660            .unwrap();
661        assert!(log.inclusion_proof(5).is_err());
662    }
663
664    #[test]
665    fn sth_round_trip_verifies() {
666        let mut log = TransparencyLog::new();
667        let operator = SigningKey::generate();
668        log.set_operator(operator.verifying_key());
669        log.append(LogEntryKind::VersionAttestation, b"x", 1)
670            .unwrap();
671        let sth = log.sign_tree_head(&operator);
672        assert!(log.verify_tree_head(&sth).is_ok());
673    }
674
675    #[test]
676    fn sth_with_tampered_root_rejects() {
677        let mut log = TransparencyLog::new();
678        let operator = SigningKey::generate();
679        log.set_operator(operator.verifying_key());
680        log.append(LogEntryKind::VersionAttestation, b"x", 1)
681            .unwrap();
682        let mut sth = log.sign_tree_head(&operator);
683        sth.root_hash[0] ^= 0x01;
684        assert!(log.verify_tree_head(&sth).is_err());
685    }
686
687    #[test]
688    fn sth_without_operator_rejects() {
689        let mut log = TransparencyLog::new();
690        log.append(LogEntryKind::VersionAttestation, b"x", 1)
691            .unwrap();
692        let operator = SigningKey::generate();
693        let sth = log.sign_tree_head(&operator);
694        assert!(log.verify_tree_head(&sth).is_err());
695    }
696
697    #[test]
698    fn kind_round_trips() {
699        for kind in [
700            LogEntryKind::VersionAttestation,
701            LogEntryKind::ManifestSignature,
702            LogEntryKind::KeyRotation,
703            LogEntryKind::KeyRevocation,
704            LogEntryKind::SlsaStatement,
705            LogEntryKind::DsseEnvelope,
706        ] {
707            let raw = kind as u16;
708            assert_eq!(LogEntryKind::from_u16(raw).unwrap(), kind);
709        }
710        assert!(LogEntryKind::from_u16(999).is_err());
711    }
712
713    mod properties {
714        use super::*;
715        use hegel::generators as gs;
716
717        // Cap raised from 16 to 256 (audit pass note): 16 only
718        // exercised power-of-2 boundaries up to 2^3=8; larger
719        // ranges drive the cascade past the n=17, 33, 65, 129
720        // off-by-one boundaries and the n=32, 64, 128, 256
721        // power-of-2 cases. Cost stays well under a second per
722        // trial because the subtree cache makes append O(log n).
723        fn draw_payloads(tc: &hegel::TestCase) -> Vec<Vec<u8>> {
724            let n = tc.draw(gs::integers::<usize>().min_value(1).max_value(256));
725            let mut out: Vec<Vec<u8>> = Vec::with_capacity(n);
726            for _ in 0..n {
727                out.push(tc.draw(gs::binary().max_size(64)));
728            }
729            out
730        }
731
732        fn build_log(payloads: &[Vec<u8>]) -> TransparencyLog {
733            let mut log = TransparencyLog::new();
734            for (i, p) in payloads.iter().enumerate() {
735                log.append(LogEntryKind::DsseEnvelope, p, (i as u64) + 1)
736                    .unwrap_or_else(|_| std::process::abort());
737            }
738            log
739        }
740
741        #[hegel::test]
742        fn prop_tree_size_matches_entries(tc: hegel::TestCase) {
743            let payloads = draw_payloads(&tc);
744            let log = build_log(&payloads);
745            assert_eq!(log.tree_size() as usize, payloads.len());
746            assert_eq!(log.entries().len(), payloads.len());
747        }
748
749        #[hegel::test]
750        fn prop_inclusion_proof_roundtrip_for_any_n(tc: hegel::TestCase) {
751            let payloads = draw_payloads(&tc);
752            let log = build_log(&payloads);
753            let root = log.root_hash();
754            for (i, p) in payloads.iter().enumerate() {
755                let entry = log.entry(i as u64).unwrap_or_else(|| std::process::abort());
756                let proof = log
757                    .inclusion_proof(i as u64)
758                    .unwrap_or_else(|_| std::process::abort());
759                let leaf = leaf_hash(
760                    entry.kind,
761                    entry.seq,
762                    entry.timestamp_version,
763                    &entry.prev_leaf_hash,
764                    p,
765                );
766                verify_inclusion_proof(
767                    leaf,
768                    proof.leaf_index,
769                    proof.tree_size,
770                    &proof.audit_path,
771                    root,
772                )
773                .unwrap_or_else(|_| std::process::abort());
774            }
775        }
776
777        #[hegel::test]
778        fn prop_tampered_payload_rejects(tc: hegel::TestCase) {
779            let payloads = draw_payloads(&tc);
780            let log = build_log(&payloads);
781            let root = log.root_hash();
782            let idx = tc.draw(gs::integers::<usize>().max_value(payloads.len().saturating_sub(1)));
783            let entry = log
784                .entry(idx as u64)
785                .unwrap_or_else(|| std::process::abort());
786            let original = payloads
787                .get(idx)
788                .unwrap_or_else(|| std::process::abort())
789                .clone();
790            let mut tampered = original;
791            tampered.push(0xFF);
792            let proof = log
793                .inclusion_proof(idx as u64)
794                .unwrap_or_else(|_| std::process::abort());
795            let leaf = leaf_hash(
796                entry.kind,
797                entry.seq,
798                entry.timestamp_version,
799                &entry.prev_leaf_hash,
800                &tampered,
801            );
802            assert!(verify_inclusion_proof(
803                leaf,
804                proof.leaf_index,
805                proof.tree_size,
806                &proof.audit_path,
807                root
808            )
809            .is_err());
810        }
811
812        #[hegel::test]
813        fn prop_wrong_index_rejects(tc: hegel::TestCase) {
814            let n = tc.draw(gs::integers::<usize>().min_value(2).max_value(256));
815            let mut payloads: Vec<Vec<u8>> = Vec::with_capacity(n);
816            for _ in 0..n {
817                payloads.push(tc.draw(gs::binary().max_size(64)));
818            }
819            let log = build_log(&payloads);
820            let root = log.root_hash();
821            let real = tc.draw(gs::integers::<usize>().max_value(n - 1));
822            let wrong_candidate = tc.draw(gs::integers::<usize>().max_value(n - 1));
823            // Choose a different index; fall back if the draw collided.
824            let wrong = if wrong_candidate == real {
825                (real + 1) % n
826            } else {
827                wrong_candidate
828            };
829            let entry = log
830                .entry(real as u64)
831                .unwrap_or_else(|| std::process::abort());
832            let payload = payloads.get(real).unwrap_or_else(|| std::process::abort());
833            let proof = log
834                .inclusion_proof(real as u64)
835                .unwrap_or_else(|_| std::process::abort());
836            let leaf = leaf_hash(
837                entry.kind,
838                entry.seq,
839                entry.timestamp_version,
840                &entry.prev_leaf_hash,
841                payload,
842            );
843            let result = verify_inclusion_proof(
844                leaf,
845                wrong as u64,
846                proof.tree_size,
847                &proof.audit_path,
848                root,
849            );
850            assert!(result.is_err());
851        }
852
853        #[hegel::test]
854        fn prop_tampered_proof_sibling_rejects(tc: hegel::TestCase) {
855            // Need at least 2 leaves so audit_path is non-empty.
856            let n = tc.draw(gs::integers::<usize>().min_value(2).max_value(256));
857            let mut payloads: Vec<Vec<u8>> = Vec::with_capacity(n);
858            for _ in 0..n {
859                payloads.push(tc.draw(gs::binary().max_size(64)));
860            }
861            let log = build_log(&payloads);
862            let root = log.root_hash();
863            let idx = tc.draw(gs::integers::<usize>().max_value(n - 1));
864            let entry = log
865                .entry(idx as u64)
866                .unwrap_or_else(|| std::process::abort());
867            let payload = payloads.get(idx).unwrap_or_else(|| std::process::abort());
868            let mut proof = log
869                .inclusion_proof(idx as u64)
870                .unwrap_or_else(|_| std::process::abort());
871            if proof.audit_path.is_empty() {
872                // Single-leaf tree: nothing to tamper with.
873                return;
874            }
875            let sibling_index =
876                tc.draw(gs::integers::<usize>().max_value(proof.audit_path.len() - 1));
877            if let Some(sibling) = proof.audit_path.get_mut(sibling_index) {
878                sibling[0] ^= 0x01;
879            }
880            let leaf = leaf_hash(
881                entry.kind,
882                entry.seq,
883                entry.timestamp_version,
884                &entry.prev_leaf_hash,
885                payload,
886            );
887            assert!(verify_inclusion_proof(
888                leaf,
889                proof.leaf_index,
890                proof.tree_size,
891                &proof.audit_path,
892                root
893            )
894            .is_err());
895        }
896
897        #[hegel::test]
898        fn prop_leaf_chain_is_monotonic(tc: hegel::TestCase) {
899            let payloads = draw_payloads(&tc);
900            let log = build_log(&payloads);
901            let entries = log.entries();
902            for pair in entries.windows(2) {
903                let prev = &pair[0];
904                let curr = &pair[1];
905                assert_eq!(curr.seq, prev.seq.saturating_add(1));
906                let expected_prev_hash = leaf_hash(
907                    prev.kind,
908                    prev.seq,
909                    prev.timestamp_version,
910                    &prev.prev_leaf_hash,
911                    payloads
912                        .get(prev.seq as usize)
913                        .unwrap_or_else(|| std::process::abort()),
914                );
915                assert_eq!(curr.prev_leaf_hash, expected_prev_hash);
916            }
917        }
918
919        #[hegel::test]
920        fn prop_sth_sign_verify_roundtrip(tc: hegel::TestCase) {
921            let payloads = draw_payloads(&tc);
922            let mut log = build_log(&payloads);
923            let operator = SigningKey::generate();
924            log.set_operator(operator.verifying_key());
925            let sth = log.sign_tree_head(&operator);
926            assert!(log.verify_tree_head(&sth).is_ok());
927        }
928
929        #[hegel::test]
930        fn prop_forged_sth_rejects(tc: hegel::TestCase) {
931            let payloads = draw_payloads(&tc);
932            let mut log = build_log(&payloads);
933            let operator = SigningKey::generate();
934            log.set_operator(operator.verifying_key());
935            let mut sth = log.sign_tree_head(&operator);
936            // Mutate one byte of the signed root after signing.
937            sth.root_hash[0] ^= 0x01;
938            assert!(log.verify_tree_head(&sth).is_err());
939        }
940
941        /// Issue #36 — the incremental subtree-roots cache must
942        /// agree with from-scratch RFC 6962 MTH for every prefix.
943        ///
944        /// Builds a log of N appends, then for each prefix [0..i]
945        /// computes the root via the cache (incremental) and via a
946        /// from-scratch recursion over the leaf slice. They must
947        /// agree at every i.
948        #[hegel::test]
949        fn prop_subtree_cache_matches_from_scratch(tc: hegel::TestCase) {
950            let payloads = draw_payloads(&tc);
951            // Build the log incrementally, snapshotting root_hash at every step.
952            let mut log = TransparencyLog::new();
953            let mut incremental_roots = Vec::with_capacity(payloads.len());
954            for (i, p) in payloads.iter().enumerate() {
955                log.append(LogEntryKind::DsseEnvelope, p, (i as u64) + 1)
956                    .unwrap_or_else(|_| std::process::abort());
957                incremental_roots.push(log.root_hash());
958            }
959            // From-scratch: recompute the MTH for every prefix using
960            // a fresh recursion over the leaf hashes.
961            let leaves: Vec<[u8; 32]> = (0..log.tree_size())
962                .map(|i| log.leaf_hash_at(i).unwrap_or_else(|| std::process::abort()))
963                .collect();
964            for (i, expected) in incremental_roots.iter().enumerate() {
965                let from_scratch = mth_from_scratch(&leaves[..=i]);
966                if from_scratch != *expected {
967                    std::process::abort();
968                }
969            }
970        }
971
972        /// From-scratch RFC 6962 MTH for cross-checking the cache.
973        /// Mirrors the recursion the cache replaced.
974        fn mth_from_scratch(leaves: &[[u8; 32]]) -> [u8; 32] {
975            match leaves.len() {
976                0 => empty_root_for_test(),
977                1 => leaves[0],
978                n => {
979                    let k = split_point_for_test(n);
980                    let left = mth_from_scratch(&leaves[..k]);
981                    let right = mth_from_scratch(&leaves[k..]);
982                    node_hash_for_test(&left, &right)
983                }
984            }
985        }
986
987        const fn split_point_for_test(n: usize) -> usize {
988            let mut k = 1usize;
989            while k.saturating_mul(2) < n {
990                k = k.saturating_mul(2);
991            }
992            k
993        }
994
995        fn empty_root_for_test() -> [u8; 32] {
996            let mut h = blake3::Hasher::new();
997            h.update(LOG_EMPTY_DOMAIN);
998            *h.finalize().as_bytes()
999        }
1000
1001        fn node_hash_for_test(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
1002            let mut h = blake3::Hasher::new();
1003            h.update(LOG_NODE_DOMAIN);
1004            h.update(left);
1005            h.update(right);
1006            *h.finalize().as_bytes()
1007        }
1008
1009        /// Issue #29: a verifier holding only the log + its root can
1010        /// verify every entry's inclusion without the original payload.
1011        #[hegel::test]
1012        fn prop_self_contained_inclusion_proof_verification(tc: hegel::TestCase) {
1013            let payloads = draw_payloads(&tc);
1014            let log = build_log(&payloads);
1015            let root = log.root_hash();
1016
1017            // Deliberately DROP payloads before verification — the
1018            // whole point of this property is that the log is
1019            // self-sufficient for proof verification.
1020            drop(payloads);
1021
1022            for i in 0..log.tree_size() {
1023                let leaf = log.leaf_hash_at(i).unwrap_or_else(|| std::process::abort());
1024                let proof = log
1025                    .inclusion_proof(i)
1026                    .unwrap_or_else(|_| std::process::abort());
1027                verify_inclusion_proof(
1028                    leaf,
1029                    proof.leaf_index,
1030                    proof.tree_size,
1031                    &proof.audit_path,
1032                    root,
1033                )
1034                .unwrap_or_else(|_| std::process::abort());
1035            }
1036        }
1037    }
1038
1039    mod leaf_hash_at_tests {
1040        use super::*;
1041
1042        #[test]
1043        fn leaf_hash_at_matches_leaf_hash_for_every_entry() {
1044            let mut log = TransparencyLog::new();
1045            let payloads: Vec<&[u8]> = vec![b"alpha", b"beta", b"gamma", b"delta", b"epsilon"];
1046            for (i, p) in payloads.iter().enumerate() {
1047                log.append(LogEntryKind::DsseEnvelope, p, (i as u64) + 1)
1048                    .unwrap();
1049            }
1050            for (i, p) in payloads.iter().enumerate() {
1051                let entry = log.entry(i as u64).unwrap();
1052                let expected = leaf_hash(
1053                    entry.kind,
1054                    entry.seq,
1055                    entry.timestamp_version,
1056                    &entry.prev_leaf_hash,
1057                    p,
1058                );
1059                let stored = log.leaf_hash_at(i as u64).unwrap();
1060                assert_eq!(stored, expected);
1061            }
1062        }
1063
1064        #[test]
1065        fn leaf_hash_at_out_of_range_returns_none() {
1066            let mut log = TransparencyLog::new();
1067            log.append(LogEntryKind::VersionAttestation, b"only", 1)
1068                .unwrap();
1069            assert!(log.leaf_hash_at(0).is_some());
1070            assert!(log.leaf_hash_at(1).is_none());
1071            assert!(log.leaf_hash_at(u64::MAX).is_none());
1072        }
1073
1074        #[test]
1075        fn leaf_hash_at_on_empty_log_returns_none() {
1076            let log = TransparencyLog::new();
1077            assert!(log.leaf_hash_at(0).is_none());
1078        }
1079    }
1080}