kaspa_consensus/processes/reachability/tests/
mod.rs

1//!
2//! Test utils for reachability
3//!
4use super::{inquirer::*, tree::*};
5use crate::{
6    model::stores::{
7        children::ChildrenStore,
8        reachability::{ReachabilityStore, ReachabilityStoreReader},
9        relations::{RelationsStore, RelationsStoreReader},
10    },
11    processes::{
12        ghostdag::mergeset::unordered_mergeset_without_selected_parent,
13        reachability::interval::Interval,
14        relations::{delete_reachability_relations, init as relations_init, RelationsStoreExtensions},
15    },
16};
17use itertools::Itertools;
18use kaspa_consensus_core::{
19    blockhash::{BlockHashExtensions, BlockHashes, ORIGIN},
20    BlockHashMap, BlockHashSet,
21};
22use kaspa_database::prelude::{DirectWriter, StoreError};
23use kaspa_hashes::Hash;
24use std::collections::{
25    hash_map::Entry::{Occupied, Vacant},
26    VecDeque,
27};
28use thiserror::Error;
29
30#[cfg(test)]
31pub mod gen;
32
33/// A struct with fluent API to streamline reachability store building
34pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> {
35    store: &'a mut T,
36}
37
38impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> {
39    pub fn new(store: &'a mut T) -> Self {
40        Self { store }
41    }
42
43    pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self {
44        let parent_height = if !parent.is_none() {
45            self.store.append_child(parent, hash).unwrap();
46            self.store.get_height(parent).unwrap()
47        } else {
48            0
49        };
50        self.store.insert(hash, parent, Interval::empty(), parent_height + 1).unwrap();
51        self
52    }
53}
54
55/// A struct with fluent API to streamline tree building
56pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> {
57    store: &'a mut T,
58    reindex_depth: u64,
59    reindex_slack: u64,
60}
61
62impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> {
63    pub fn new(store: &'a mut T) -> Self {
64        Self {
65            store,
66            reindex_depth: crate::constants::perf::DEFAULT_REINDEX_DEPTH,
67            reindex_slack: crate::constants::perf::DEFAULT_REINDEX_SLACK,
68        }
69    }
70
71    pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self {
72        Self { store, reindex_depth, reindex_slack }
73    }
74
75    pub fn init(&mut self) -> &mut Self {
76        init(self.store).unwrap();
77        self
78    }
79
80    pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self {
81        init_with_params(self.store, origin, capacity).unwrap();
82        self
83    }
84
85    pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self {
86        add_tree_block(self.store, hash, parent, self.reindex_depth, self.reindex_slack).unwrap();
87        try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack).unwrap();
88        self
89    }
90
91    pub fn store(&self) -> &&'a mut T {
92        &self.store
93    }
94}
95
96#[derive(Clone)]
97pub struct DagBlock {
98    pub hash: Hash,
99    pub parents: Vec<Hash>,
100}
101
102impl DagBlock {
103    pub fn new(hash: Hash, parents: Vec<Hash>) -> Self {
104        Self { hash, parents }
105    }
106}
107
108impl From<(u64, &[u64])> for DagBlock {
109    fn from(value: (u64, &[u64])) -> Self {
110        Self::new(value.0.into(), value.1.iter().map(|&i| i.into()).collect())
111    }
112}
113
114/// A struct with fluent API to streamline DAG building
115pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized, S: RelationsStore + ChildrenStore + ?Sized> {
116    reachability: &'a mut T,
117    relations: &'a mut S,
118}
119
120impl<'a, T: ReachabilityStore + ?Sized, S: RelationsStore + ChildrenStore + ?Sized> DagBuilder<'a, T, S> {
121    pub fn new(reachability: &'a mut T, relations: &'a mut S) -> Self {
122        Self { reachability, relations }
123    }
124
125    pub fn init(&mut self) -> &mut Self {
126        init(self.reachability).unwrap();
127        relations_init(self.relations);
128        self
129    }
130
131    pub fn delete_block(&mut self, hash: Hash) -> &mut Self {
132        self.delete_block_with_writer(self.relations.default_writer(), hash)
133    }
134
135    pub fn delete_block_with_writer(&mut self, writer: impl DirectWriter, hash: Hash) -> &mut Self {
136        let mergeset = delete_reachability_relations(writer, self.relations, self.reachability, hash);
137        delete_block(self.reachability, hash, &mut mergeset.iter().cloned()).unwrap();
138        self
139    }
140
141    pub fn add_block(&mut self, block: DagBlock) -> &mut Self {
142        // Select by height (longest chain) just for the sake of internal isolated tests
143        let selected_parent = block.parents.iter().cloned().max_by_key(|p| self.reachability.get_height(*p).unwrap()).unwrap();
144        let mergeset = unordered_mergeset_without_selected_parent(self.relations, self.reachability, selected_parent, &block.parents);
145        add_block(self.reachability, block.hash, selected_parent, &mut mergeset.iter().cloned()).unwrap();
146        hint_virtual_selected_parent(self.reachability, block.hash).unwrap();
147        self.relations.insert(block.hash, BlockHashes::new(block.parents)).unwrap();
148        self
149    }
150
151    pub fn store(&self) -> &&'a mut T {
152        &self.reachability
153    }
154}
155
156/// Validates that relations are consistent and do not contain any dangling hash etc
157pub fn validate_relations<S: RelationsStoreReader + ?Sized>(relations: &S) -> std::result::Result<(), TestError> {
158    let mut queue = VecDeque::<Hash>::from([ORIGIN]);
159    let mut visited: BlockHashSet = queue.iter().copied().collect();
160    while let Some(current) = queue.pop_front() {
161        let parents = relations.get_parents(current)?;
162        assert_eq!(parents.len(), parents.iter().copied().unique_by(|&h| h).count(), "duplicate hashes in parents array");
163        for parent in parents.iter().copied() {
164            let parent_children = relations.get_children(parent)?.read().iter().copied().collect_vec();
165            assert!(parent_children.contains(&current), "missing child entry");
166        }
167        let children = relations.get_children(current)?.read().iter().copied().collect_vec();
168        assert_eq!(children.len(), children.iter().copied().unique_by(|&h| h).count(), "duplicate hashes in children array");
169        for child in children.iter().copied() {
170            if visited.insert(child) {
171                queue.push_back(child);
172            }
173        }
174    }
175    let expected_counts = (visited.len(), visited.len());
176    let actual_counts = relations.counts().unwrap();
177    if actual_counts != expected_counts {
178        return Err(TestError::WrongCounts(expected_counts, actual_counts));
179    }
180    Ok(())
181}
182
183/// Returns the reachability subtree of `root`, i.e., all blocks B ∈ G s.t. `root` ∈ `chain(B)`
184pub fn subtree<S: ReachabilityStoreReader + ?Sized>(reachability: &S, root: Hash) -> BlockHashSet {
185    let mut queue = VecDeque::<Hash>::from([root]);
186    let mut vec = Vec::new();
187    while let Some(parent) = queue.pop_front() {
188        let children = reachability.get_children(parent).unwrap();
189        queue.extend(children.iter());
190        vec.extend(children.iter());
191    }
192    let len = vec.len();
193    let set: BlockHashSet = vec.into_iter().collect();
194    assert_eq!(len, set.len());
195    set
196}
197
198/// Returns the inclusive DAG past of `hash`, i.e., all blocks which are reachable from `hash` via some parent path.
199/// Note that the `past` is built using a BFS traversal so it can be used as reference for testing the reachability
200/// oracle   
201pub fn inclusive_past<S: RelationsStoreReader + ?Sized>(relations: &S, hash: Hash) -> BlockHashSet {
202    let mut queue = VecDeque::<Hash>::from([hash]);
203    let mut visited: BlockHashSet = queue.iter().copied().collect();
204    while let Some(current) = queue.pop_front() {
205        let parents = relations.get_parents(current).unwrap();
206        for parent in parents.iter().copied() {
207            if parent != ORIGIN && visited.insert(parent) {
208                queue.push_back(parent);
209            }
210        }
211    }
212    visited
213}
214
215/// Builds a full DAG reachability matrix of all block pairs (B, C) ∈ G x G. The returned matrix is built
216/// using explicit past traversals so it can be used as reference for testing the reachability oracle
217pub fn build_transitive_closure_ref<S: RelationsStoreReader + ?Sized>(relations: &S, hashes: &[Hash]) -> TransitiveClosure {
218    let mut closure = TransitiveClosure::new();
219    for x in hashes.iter().copied() {
220        let past = inclusive_past(relations, x);
221        for y in hashes.iter().copied() {
222            closure.set(x, y, past.contains(&y));
223        }
224    }
225    closure
226}
227
228/// Builds a full DAG reachability matrix of all block pairs (B, C) ∈ G x G by querying the reachability oracle.
229/// The function also asserts this matrix against a closure reference obtained by explicit past traversals
230pub fn build_transitive_closure<S: RelationsStoreReader + ?Sized, V: ReachabilityStoreReader + ?Sized>(
231    relations: &S,
232    reachability: &V,
233    hashes: &[Hash],
234) -> TransitiveClosure {
235    let mut closure = TransitiveClosure::new();
236    for x in hashes.iter().copied() {
237        for y in hashes.iter().copied() {
238            closure.set(x, y, is_dag_ancestor_of(reachability, y, x).unwrap());
239        }
240    }
241    let expected_closure = build_transitive_closure_ref(relations, hashes);
242    assert_eq!(expected_closure, closure);
243    closure
244}
245
246/// Builds a full chain reachability matrix of all block pairs (B, C) ∈ G x G. The returned matrix is built
247/// using explicit subtree traversals so it can be used as reference for testing the reachability oracle
248pub fn build_chain_closure_ref<S: ReachabilityStoreReader + ?Sized>(reachability: &S, hashes: &[Hash]) -> TransitiveClosure {
249    let mut closure = TransitiveClosure::new();
250    for x in hashes.iter().copied() {
251        let subtree = subtree(reachability, x);
252        for y in hashes.iter().copied() {
253            closure.set(x, y, x == y || subtree.contains(&y));
254        }
255    }
256    closure
257}
258
259/// Builds a full chain reachability matrix of all block pairs (B, C) ∈ G x G by querying the reachability oracle.
260/// The function also asserts this matrix against a chain closure reference obtained by explicit subtree traversals
261pub fn build_chain_closure<V: ReachabilityStoreReader + ?Sized>(reachability: &V, hashes: &[Hash]) -> TransitiveClosure {
262    let mut closure = TransitiveClosure::new();
263    for x in hashes.iter().copied() {
264        for y in hashes.iter().copied() {
265            closure.set(x, y, is_chain_ancestor_of(reachability, x, y).unwrap());
266        }
267    }
268    let expected_closure = build_chain_closure_ref(reachability, hashes);
269    assert_eq!(expected_closure, closure);
270    closure
271}
272
273/// Builds full chain and DAG closures for all block pairs (B, C) ∈ G x G and asserts them against
274/// the provided references. The provided references might contain more information (of blocks already
275/// deleted), hence we only verify a subset relation   
276pub fn validate_closures<S: RelationsStoreReader + ?Sized, V: ReachabilityStoreReader + ?Sized>(
277    relations: &S,
278    reachability: &V,
279    chain_closure_ref: &TransitiveClosure,
280    dag_closure_ref: &TransitiveClosure,
281    hashes_ref: &BlockHashSet,
282) {
283    let hashes = subtree(reachability, ORIGIN).into_iter().collect_vec();
284    assert_eq!(hashes_ref, &hashes.iter().copied().collect::<BlockHashSet>());
285    let chain_closure = build_chain_closure(reachability, &hashes);
286    let dag_closure = build_transitive_closure(relations, reachability, &hashes);
287    assert!(chain_closure.subset_of(chain_closure_ref));
288    assert!(dag_closure.subset_of(dag_closure_ref));
289    assert_eq!(reachability.count().unwrap(), hashes.len() + 1);
290}
291
292/// A struct for holding full quadratic reachability information. Can be used for chain or DAG
293/// reachability closures. Note this should only be used for relatively small DAGs due to its
294/// quadratic space requirement
295#[derive(PartialEq, Eq, Debug, Default)]
296pub struct TransitiveClosure {
297    matrix: BlockHashMap<BlockHashMap<bool>>,
298}
299
300impl TransitiveClosure {
301    pub fn new() -> Self {
302        Self { matrix: Default::default() }
303    }
304
305    pub fn set(&mut self, x: Hash, y: Hash, b: bool) {
306        let row = match self.matrix.entry(x) {
307            Occupied(e) => e.into_mut(),
308            Vacant(e) => e.insert(Default::default()),
309        };
310
311        if let Vacant(e) = row.entry(y) {
312            e.insert(b);
313        } else {
314            panic!()
315        }
316    }
317
318    pub fn get(&self, x: Hash, y: Hash) -> Option<bool> {
319        Some(*self.matrix.get(&x)?.get(&y)?)
320    }
321
322    /// Checks if this matrix is a subset of `other`
323    pub fn subset_of(&self, other: &TransitiveClosure) -> bool {
324        for (x, row) in self.matrix.iter() {
325            for (y, val) in row.iter() {
326                if let Some(other_val) = other.get(*x, *y) {
327                    if other_val != *val {
328                        return false;
329                    }
330                } else {
331                    return false;
332                }
333            }
334        }
335        true
336    }
337}
338
339#[derive(Error, Debug)]
340pub enum TestError {
341    #[error("data store error")]
342    StoreError(#[from] StoreError),
343
344    #[error("empty interval")]
345    EmptyInterval(Hash, Interval),
346
347    #[error("sibling intervals are expected to be consecutive")]
348    NonConsecutiveSiblingIntervals(Interval, Interval),
349
350    #[error("future covering set intervals are expected to be ordered")]
351    NonOrderedFutureCoveringItems(Interval, Interval),
352
353    #[error("child interval out of parent bounds")]
354    IntervalOutOfParentBounds { parent: Hash, child: Hash, parent_interval: Interval, child_interval: Interval },
355
356    #[error("expected store counts: {0:?}, but got: {1:?}")]
357    WrongCounts((usize, usize), (usize, usize)),
358}
359
360pub trait StoreValidationExtensions {
361    /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers)
362    fn in_past_of(&self, block: u64, other: u64) -> bool;
363
364    /// Checks if `block` and `other` are in the anticone of each other
365    /// (creates hashes from the u64 numbers)
366    fn are_anticone(&self, block: u64, other: u64) -> bool;
367
368    /// Validates that all tree intervals match the expected interval relations
369    fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>;
370}
371
372impl<T: ReachabilityStoreReader + ?Sized> StoreValidationExtensions for T {
373    fn in_past_of(&self, block: u64, other: u64) -> bool {
374        if block == other {
375            return false;
376        }
377        let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap();
378        if res {
379            // Assert that the `future` relation is indeed asymmetric
380            assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap())
381        }
382        res
383    }
384
385    fn are_anticone(&self, block: u64, other: u64) -> bool {
386        !is_dag_ancestor_of(self, block.into(), other.into()).unwrap()
387            && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap()
388    }
389
390    fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> {
391        let mut queue = VecDeque::<Hash>::from([root]);
392        while let Some(parent) = queue.pop_front() {
393            let children = self.get_children(parent)?;
394            queue.extend(children.iter());
395
396            let parent_interval = self.get_interval(parent)?;
397            if parent_interval.is_empty() {
398                return Err(TestError::EmptyInterval(parent, parent_interval));
399            }
400
401            // Verify parent-child strict relation
402            for child in children.iter().cloned() {
403                let child_interval = self.get_interval(child)?;
404                if !parent_interval.strictly_contains(child_interval) {
405                    return Err(TestError::IntervalOutOfParentBounds { parent, child, parent_interval, child_interval });
406                }
407            }
408
409            // Iterate over consecutive siblings
410            for siblings in children.windows(2) {
411                let sibling_interval = self.get_interval(siblings[0])?;
412                let current_interval = self.get_interval(siblings[1])?;
413                if sibling_interval.end + 1 != current_interval.start {
414                    return Err(TestError::NonConsecutiveSiblingIntervals(sibling_interval, current_interval));
415                }
416            }
417
418            // Assert future covering set exists and is ordered correctly
419            let future_covering_set = self.get_future_covering_set(parent)?;
420            for neighbors in future_covering_set.windows(2) {
421                let left_interval = self.get_interval(neighbors[0])?;
422                let right_interval = self.get_interval(neighbors[1])?;
423                if left_interval.is_empty() {
424                    return Err(TestError::EmptyInterval(neighbors[0], left_interval));
425                }
426                if right_interval.is_empty() {
427                    return Err(TestError::EmptyInterval(neighbors[1], right_interval));
428                }
429                if left_interval.end >= right_interval.start {
430                    return Err(TestError::NonOrderedFutureCoveringItems(left_interval, right_interval));
431                }
432            }
433        }
434        Ok(())
435    }
436}