Skip to main content

ant_node/replication/
types.rs

1//! Core types for the replication subsystem.
2//!
3//! These types represent the state machine states, queue entries, and domain
4//! concepts from the Kademlia-style replication design (see
5//! `docs/REPLICATION_DESIGN.md`).
6
7use std::cmp::Ordering;
8use std::collections::{HashMap, HashSet};
9use std::time::Instant;
10
11use serde::{Deserialize, Serialize};
12
13use crate::ant_protocol::XorName;
14use saorsa_core::identity::PeerId;
15
16// ---------------------------------------------------------------------------
17// Verification state machine (Section 8 of REPLICATION_DESIGN.md)
18// ---------------------------------------------------------------------------
19
20/// Verification state machine.
21///
22/// Each unknown key transitions through these states exactly once per offer
23/// lifecycle.  See Section 8 of `REPLICATION_DESIGN.md` for the full
24/// state-transition diagram.
25#[derive(Debug, Clone, PartialEq, Eq)]
26pub enum VerificationState {
27    /// Offer received, not yet processed.
28    OfferReceived,
29    /// Passed admission filter, awaiting quorum / paid-list verification.
30    PendingVerify,
31    /// Presence quorum passed (>= `QuorumNeeded` positives from
32    /// `QuorumTargets`).
33    QuorumVerified,
34    /// Paid-list authorisation succeeded (>= `ConfirmNeeded` confirmations or
35    /// derived from replica majority).
36    PaidListVerified,
37    /// Queued for record fetch.
38    QueuedForFetch,
39    /// Actively fetching from a verified source.
40    Fetching,
41    /// Successfully stored locally.
42    Stored,
43    /// Fetch failed but retryable (alternate sources remain).
44    FetchRetryable,
45    /// Fetch permanently abandoned (terminal failure or no alternate sources).
46    FetchAbandoned,
47    /// Quorum failed definitively (both paid-list and presence impossible this
48    /// round).
49    QuorumFailed,
50    /// Quorum inconclusive (timeout with neither success nor fail-fast).
51    QuorumInconclusive,
52    /// Terminal: quorum abandoned, key forgotten.
53    QuorumAbandoned,
54    /// Terminal: key returned to idle (forgotten, requires new offer to
55    /// re-enter).
56    Idle,
57}
58
59// ---------------------------------------------------------------------------
60// Hint pipeline classification
61// ---------------------------------------------------------------------------
62
63/// Whether a key was admitted via replica hints or paid hints only.
64#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
65pub enum HintPipeline {
66    /// Key is in the admitted replica-hint pipeline (fetch-eligible).
67    Replica,
68    /// Key is in the paid-hint-only pipeline (`PaidForList` update only, no
69    /// fetch).
70    PaidOnly,
71}
72
73// ---------------------------------------------------------------------------
74// Pending-verification table entry
75// ---------------------------------------------------------------------------
76
77/// Entry in the pending-verification table.
78///
79/// Tracks a single key through the verification FSM, recording which peers
80/// responded and which have been tried for fetch.
81#[derive(Debug, Clone)]
82pub struct VerificationEntry {
83    /// Current state in the verification FSM.
84    pub state: VerificationState,
85    /// Which pipeline admitted this key.
86    pub pipeline: HintPipeline,
87    /// Peers that responded `Present` during verification (verified fetch
88    /// sources).
89    pub verified_sources: Vec<PeerId>,
90    /// Peers already tried for fetch (to avoid retrying the same source).
91    pub tried_sources: HashSet<PeerId>,
92    /// When this entry was created.
93    pub created_at: Instant,
94    /// The peer that originally hinted this key (for source tracking).
95    pub hint_sender: PeerId,
96}
97
98// ---------------------------------------------------------------------------
99// Fetch queue candidate
100// ---------------------------------------------------------------------------
101
102/// A candidate queued for fetch, ordered by relevance (nearest-first).
103///
104/// Implements [`Ord`] with *reversed* distance comparison so that a
105/// [`BinaryHeap`](std::collections::BinaryHeap) (max-heap) dequeues the
106/// nearest key first.
107#[derive(Debug, Clone)]
108pub struct FetchCandidate {
109    /// The key to fetch.
110    pub key: XorName,
111    /// XOR distance from self to key (for priority ordering).
112    pub distance: XorName,
113    /// Verified source peers that responded `Present`.
114    pub sources: Vec<PeerId>,
115}
116
117impl Eq for FetchCandidate {}
118
119impl PartialEq for FetchCandidate {
120    fn eq(&self, other: &Self) -> bool {
121        self.distance == other.distance && self.key == other.key
122    }
123}
124
125impl Ord for FetchCandidate {
126    fn cmp(&self, other: &Self) -> Ordering {
127        // Reverse ordering: smaller distance = higher priority (BinaryHeap is
128        // max-heap).  Tie-break on key for consistency with PartialEq.
129        other
130            .distance
131            .cmp(&self.distance)
132            .then_with(|| self.key.cmp(&other.key))
133    }
134}
135
136impl PartialOrd for FetchCandidate {
137    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
138        Some(self.cmp(other))
139    }
140}
141
142// ---------------------------------------------------------------------------
143// Verification evidence types
144// ---------------------------------------------------------------------------
145
146/// Per-key presence evidence from a verification round.
147#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
148pub enum PresenceEvidence {
149    /// Peer holds the record.
150    Present,
151    /// Peer does not hold the record.
152    Absent,
153    /// Peer did not respond in time (neutral, not negative).
154    Unresolved,
155}
156
157/// Per-key paid-list evidence from a verification round.
158#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
159pub enum PaidListEvidence {
160    /// Peer confirms key is in its `PaidForList`.
161    Confirmed,
162    /// Peer says key is NOT in its `PaidForList`.
163    NotFound,
164    /// Peer did not respond in time (neutral).
165    Unresolved,
166}
167
168/// Aggregated verification evidence for a single key from one verification
169/// round.
170#[derive(Debug, Clone)]
171pub struct KeyVerificationEvidence {
172    /// Presence evidence per peer (from `QuorumTargets`).
173    pub presence: HashMap<PeerId, PresenceEvidence>,
174    /// Paid-list evidence per peer (from `PaidTargets`).
175    pub paid_list: HashMap<PeerId, PaidListEvidence>,
176}
177
178// ---------------------------------------------------------------------------
179// Failure evidence (Section 14 — TrustEngine integration)
180// ---------------------------------------------------------------------------
181
182/// Failure evidence types emitted to `TrustEngine` (Section 14).
183#[derive(Debug, Clone)]
184pub enum FailureEvidence {
185    /// Failed fetch attempt from a source peer.
186    ReplicationFailure {
187        /// The peer that failed to serve the record.
188        peer: PeerId,
189        /// The key that could not be fetched.
190        key: XorName,
191    },
192    /// Audit failure with confirmed responsible keys.
193    AuditFailure {
194        /// Unique identifier for the audit challenge.
195        challenge_id: u64,
196        /// The peer that was challenged.
197        challenged_peer: PeerId,
198        /// Keys confirmed as failed.
199        confirmed_failed_keys: Vec<XorName>,
200        /// Why the audit failed.
201        reason: AuditFailureReason,
202    },
203    /// Peer claiming bootstrap past grace period.
204    BootstrapClaimAbuse {
205        /// The offending peer.
206        peer: PeerId,
207        /// When this peer was first seen.
208        first_seen: Instant,
209    },
210}
211
212/// Reason for audit failure.
213#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
214pub enum AuditFailureReason {
215    /// Peer timed out (no response within deadline).
216    Timeout,
217    /// Response was malformed.
218    MalformedResponse,
219    /// One or more per-key digest mismatches.
220    DigestMismatch,
221    /// Key was absent (signalled by sentinel digest).
222    KeyAbsent,
223    /// Peer explicitly rejected the audit challenge.
224    Rejected,
225}
226
227// ---------------------------------------------------------------------------
228// Peer sync tracking
229// ---------------------------------------------------------------------------
230
231/// Record of sync history with a peer, for `RepairOpportunity` tracking.
232#[derive(Debug, Clone)]
233pub struct PeerSyncRecord {
234    /// Last time we successfully synced with this peer.
235    pub last_sync: Option<Instant>,
236    /// Number of full neighbor-sync cycles completed since last sync with this
237    /// peer.
238    pub cycles_since_sync: u32,
239}
240
241impl PeerSyncRecord {
242    /// Whether this peer has had a repair opportunity (synced at least once
243    /// and at least one subsequent cycle has completed).
244    #[must_use]
245    pub fn has_repair_opportunity(&self) -> bool {
246        self.last_sync.is_some() && self.cycles_since_sync >= 1
247    }
248}
249
250// ---------------------------------------------------------------------------
251// Neighbor sync cycle state
252// ---------------------------------------------------------------------------
253
254/// Neighbor sync cycle state.
255///
256/// Tracks a deterministic walk through the current close-group snapshot,
257/// per-peer cooldown times, and bootstrap claim first-seen timestamps.
258#[derive(Debug)]
259pub struct NeighborSyncState {
260    /// Deterministic ordering of peers for the current cycle (snapshot).
261    pub order: Vec<PeerId>,
262    /// Current cursor position into `order`.
263    pub cursor: usize,
264    /// Per-peer last successful sync time (for cooldown).
265    pub last_sync_times: HashMap<PeerId, Instant>,
266    /// Bootstrap claim first-seen timestamps per peer.
267    ///
268    /// Entries are removed when a peer passes or fails audit (i.e. stops
269    /// claiming bootstrap). Under Sybil attack with many distinct peer IDs
270    /// perpetually claiming bootstrap, this map grows unboundedly. In practice
271    /// the trust engine limits Sybil impact before this becomes a memory issue.
272    pub bootstrap_claims: HashMap<PeerId, Instant>,
273}
274
275impl NeighborSyncState {
276    /// Create a new cycle from the given close neighbors.
277    #[must_use]
278    pub fn new_cycle(close_neighbors: Vec<PeerId>) -> Self {
279        Self {
280            order: close_neighbors,
281            cursor: 0,
282            last_sync_times: HashMap::new(),
283            bootstrap_claims: HashMap::new(),
284        }
285    }
286
287    /// Whether the current cycle is complete.
288    #[must_use]
289    pub fn is_cycle_complete(&self) -> bool {
290        self.cursor >= self.order.len()
291    }
292}
293
294// ---------------------------------------------------------------------------
295// Bootstrap drain state (Section 16)
296// ---------------------------------------------------------------------------
297
298/// Bootstrap drain state tracking (Section 16).
299#[derive(Debug)]
300pub struct BootstrapState {
301    /// Whether bootstrap is complete (all peer requests done, queues empty).
302    pub drained: bool,
303    /// Number of bootstrap peer requests still pending.
304    pub pending_peer_requests: usize,
305    /// Keys discovered during bootstrap that are still in the verification /
306    /// fetch pipeline.
307    pub pending_keys: HashSet<XorName>,
308}
309
310impl BootstrapState {
311    /// Create initial bootstrap state.
312    #[must_use]
313    pub fn new() -> Self {
314        Self {
315            drained: false,
316            pending_peer_requests: 0,
317            pending_keys: HashSet::new(),
318        }
319    }
320
321    /// Check if bootstrap is drained.
322    ///
323    /// Only returns `true` after [`super::bootstrap::check_bootstrap_drained`] or
324    /// [`super::bootstrap::mark_bootstrap_drained`] has explicitly set the flag. A fresh
325    /// `BootstrapState` is NOT drained — the audit loop must wait until
326    /// bootstrap work has actually completed (Invariant 19).
327    #[must_use]
328    pub fn is_drained(&self) -> bool {
329        self.drained
330    }
331
332    /// Remove a key from the bootstrap pending set.
333    ///
334    /// Called when a key terminally leaves the verification/fetch pipeline
335    /// (stored, abandoned, quorum failed, etc.) so the drain check set
336    /// shrinks incrementally rather than being re-scanned in full.
337    pub fn remove_key(&mut self, key: &XorName) {
338        self.pending_keys.remove(key);
339    }
340}
341
342impl Default for BootstrapState {
343    fn default() -> Self {
344        Self::new()
345    }
346}
347
348// ---------------------------------------------------------------------------
349// Tests
350// ---------------------------------------------------------------------------
351
352#[cfg(test)]
353mod tests {
354    use std::collections::BinaryHeap;
355
356    use super::*;
357
358    /// Helper: build a `PeerId` from a single byte (zero-padded to 32 bytes).
359    fn peer_id_from_byte(b: u8) -> PeerId {
360        let mut bytes = [0u8; 32];
361        bytes[0] = b;
362        PeerId::from_bytes(bytes)
363    }
364
365    // -- FetchCandidate ordering -------------------------------------------
366
367    #[test]
368    fn fetch_candidate_nearest_key_has_highest_priority() {
369        let near = FetchCandidate {
370            key: [1u8; 32],
371            distance: [
372                0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373                0, 0, 0, 0,
374            ],
375            sources: vec![peer_id_from_byte(1)],
376        };
377
378        let far = FetchCandidate {
379            key: [2u8; 32],
380            distance: [
381                0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
382                0, 0, 0, 0, 0,
383            ],
384            sources: vec![peer_id_from_byte(2)],
385        };
386
387        // In a max-heap the "greatest" element pops first.
388        // Our reversed Ord makes smaller-distance candidates greater.
389        assert!(near > far, "nearer candidate should compare greater");
390
391        let mut heap = BinaryHeap::new();
392        heap.push(far.clone());
393        heap.push(near.clone());
394
395        assert_eq!(heap.len(), 2, "heap should contain both candidates");
396
397        let first = heap.pop();
398        assert!(first.is_some(), "first pop should succeed");
399        assert_eq!(
400            first.map(|c| c.key),
401            Some(near.key),
402            "nearest key should pop first"
403        );
404
405        let second = heap.pop();
406        assert!(second.is_some(), "second pop should succeed");
407        assert_eq!(
408            second.map(|c| c.key),
409            Some(far.key),
410            "farthest key should pop second"
411        );
412    }
413
414    #[test]
415    fn fetch_candidate_same_distance_and_key_is_equal() {
416        let a = FetchCandidate {
417            key: [1u8; 32],
418            distance: [5u8; 32],
419            sources: vec![],
420        };
421
422        let b = FetchCandidate {
423            key: [1u8; 32],
424            distance: [5u8; 32],
425            sources: vec![],
426        };
427
428        assert_eq!(
429            a.cmp(&b),
430            Ordering::Equal,
431            "same distance + same key should yield Equal"
432        );
433        assert_eq!(a, b, "PartialEq must agree with Ord");
434    }
435
436    #[test]
437    fn fetch_candidate_same_distance_different_key_is_deterministic() {
438        let a = FetchCandidate {
439            key: [1u8; 32],
440            distance: [5u8; 32],
441            sources: vec![],
442        };
443
444        let b = FetchCandidate {
445            key: [2u8; 32],
446            distance: [5u8; 32],
447            sources: vec![],
448        };
449
450        assert_ne!(
451            a.cmp(&b),
452            Ordering::Equal,
453            "same distance + different key must not be Equal"
454        );
455        assert_ne!(a, b, "PartialEq must agree with Ord");
456    }
457
458    // -- PeerSyncRecord ----------------------------------------------------
459
460    #[test]
461    fn peer_sync_record_no_sync_yet() {
462        let record = PeerSyncRecord {
463            last_sync: None,
464            cycles_since_sync: 0,
465        };
466        assert!(
467            !record.has_repair_opportunity(),
468            "never-synced peer has no repair opportunity"
469        );
470    }
471
472    #[test]
473    fn peer_sync_record_synced_but_no_cycle() {
474        let record = PeerSyncRecord {
475            last_sync: Some(Instant::now()),
476            cycles_since_sync: 0,
477        };
478        assert!(
479            !record.has_repair_opportunity(),
480            "synced peer with zero subsequent cycles has no repair opportunity"
481        );
482    }
483
484    #[test]
485    fn peer_sync_record_synced_with_cycle() {
486        let record = PeerSyncRecord {
487            last_sync: Some(Instant::now()),
488            cycles_since_sync: 1,
489        };
490        assert!(
491            record.has_repair_opportunity(),
492            "synced peer with >= 1 cycle should have repair opportunity"
493        );
494    }
495
496    #[test]
497    fn peer_sync_record_no_sync_many_cycles() {
498        let record = PeerSyncRecord {
499            last_sync: None,
500            cycles_since_sync: 10,
501        };
502        assert!(
503            !record.has_repair_opportunity(),
504            "never-synced peer has no repair opportunity regardless of cycle count"
505        );
506    }
507
508    // -- NeighborSyncState -------------------------------------------------
509
510    #[test]
511    fn neighbor_sync_empty_cycle_is_immediately_complete() {
512        let state = NeighborSyncState::new_cycle(vec![]);
513        assert!(
514            state.is_cycle_complete(),
515            "empty neighbor list means cycle is complete"
516        );
517    }
518
519    #[test]
520    fn neighbor_sync_new_cycle_not_complete() {
521        let peers = vec![peer_id_from_byte(1), peer_id_from_byte(2)];
522        let state = NeighborSyncState::new_cycle(peers);
523        assert!(
524            !state.is_cycle_complete(),
525            "fresh cycle with peers should not be complete"
526        );
527    }
528
529    #[test]
530    fn neighbor_sync_cycle_completes_when_cursor_reaches_end() {
531        let peers = vec![
532            peer_id_from_byte(1),
533            peer_id_from_byte(2),
534            peer_id_from_byte(3),
535        ];
536        let mut state = NeighborSyncState::new_cycle(peers);
537
538        // Simulate stepping through the cycle.
539        state.cursor = 2;
540        assert!(
541            !state.is_cycle_complete(),
542            "cursor at len-1 should not be complete"
543        );
544
545        state.cursor = 3;
546        assert!(
547            state.is_cycle_complete(),
548            "cursor at len should be complete"
549        );
550    }
551
552    #[test]
553    fn neighbor_sync_cursor_past_end_is_still_complete() {
554        let peers = vec![peer_id_from_byte(1)];
555        let mut state = NeighborSyncState::new_cycle(peers);
556        state.cursor = 5;
557        assert!(
558            state.is_cycle_complete(),
559            "cursor past end should still report complete"
560        );
561    }
562
563    // -- BootstrapState ----------------------------------------------------
564
565    #[test]
566    fn bootstrap_state_initial_not_drained() {
567        // A freshly created state must NOT report drained — the bootstrap
568        // sync task has not started yet (Invariant 19 race prevention).
569        let state = BootstrapState::new();
570        assert!(
571            !state.is_drained(),
572            "initial state must not be drained before bootstrap begins"
573        );
574    }
575
576    #[test]
577    fn bootstrap_state_pending_requests_block_drain() {
578        let mut state = BootstrapState::new();
579        state.pending_peer_requests = 3;
580        assert!(
581            !state.is_drained(),
582            "pending peer requests should block drain"
583        );
584    }
585
586    #[test]
587    fn bootstrap_state_pending_keys_block_drain() {
588        let mut state = BootstrapState::new();
589        state.pending_keys.insert([42u8; 32]);
590        assert!(!state.is_drained(), "pending keys should block drain");
591    }
592
593    #[test]
594    fn bootstrap_state_explicit_drained_overrides() {
595        let mut state = BootstrapState::new();
596        state.pending_peer_requests = 5;
597        state.pending_keys.insert([99u8; 32]);
598        state.drained = true;
599        assert!(
600            state.is_drained(),
601            "explicit drained flag should override pending counts"
602        );
603    }
604
605    #[test]
606    fn bootstrap_state_requires_explicit_drain() {
607        let mut state = BootstrapState::new();
608        state.pending_peer_requests = 2;
609        state.pending_keys.insert([1u8; 32]);
610
611        // Simulate completing work — but without explicit drain flag.
612        state.pending_peer_requests = 0;
613        state.pending_keys.clear();
614
615        assert!(
616            !state.is_drained(),
617            "clearing counters alone must not drain — requires check_bootstrap_drained"
618        );
619
620        // Explicit drain (set by check_bootstrap_drained or mark_bootstrap_drained).
621        state.drained = true;
622        assert!(state.is_drained(), "explicit flag should drain");
623    }
624
625    #[test]
626    fn bootstrap_state_default_matches_new() {
627        let from_new = BootstrapState::new();
628        let from_default = BootstrapState::default();
629
630        assert_eq!(from_new.drained, from_default.drained);
631        assert_eq!(
632            from_new.pending_peer_requests,
633            from_default.pending_peer_requests
634        );
635        assert_eq!(from_new.pending_keys, from_default.pending_keys);
636    }
637
638    // -- Scenario tests -------------------------------------------------------
639
640    /// #13: Bootstrap not drained while `pending_keys` overlap with the
641    /// pipeline. Keys must be removed from `pending_keys` for drain to occur.
642    #[test]
643    fn bootstrap_drain_requires_empty_pending_keys() {
644        let key_a: XorName = [0xA0; 32];
645        let key_b: XorName = [0xB0; 32];
646        let key_c: XorName = [0xC0; 32];
647
648        let mut state = BootstrapState::new();
649        state.pending_peer_requests = 0; // requests already done
650        state.pending_keys = std::iter::once(key_a)
651            .chain(std::iter::once(key_b))
652            .chain(std::iter::once(key_c))
653            .collect();
654
655        assert!(
656            !state.is_drained(),
657            "should NOT be drained while pending_keys still has entries"
658        );
659
660        // Simulate pipeline processing — remove one key at a time.
661        state.pending_keys.remove(&key_a);
662        assert!(!state.is_drained(), "still not drained with 2 pending keys");
663
664        state.pending_keys.remove(&key_b);
665        assert!(!state.is_drained(), "still not drained with 1 pending key");
666
667        state.pending_keys.remove(&key_c);
668        assert!(
669            !state.is_drained(),
670            "removing all keys is necessary but not sufficient — needs explicit drain"
671        );
672
673        // Simulate check_bootstrap_drained setting the flag.
674        state.drained = true;
675        assert!(state.is_drained(), "explicit drain flag should finalize");
676    }
677
678    /// Verify that the FSM terminal states are distinguishable and document
679    /// which variants are logically terminal (no outgoing transitions).
680    #[test]
681    fn verification_state_terminal_variants() {
682        let terminal_states = [
683            VerificationState::QuorumAbandoned,
684            VerificationState::FetchAbandoned,
685            VerificationState::Stored,
686            VerificationState::Idle,
687        ];
688
689        // All terminal states must be distinct from each other.
690        for (i, a) in terminal_states.iter().enumerate() {
691            for (j, b) in terminal_states.iter().enumerate() {
692                if i != j {
693                    assert_ne!(
694                        a, b,
695                        "terminal states at indices {i} and {j} must be distinct"
696                    );
697                }
698            }
699        }
700
701        // Terminal states must be distinct from all non-terminal states.
702        let non_terminal_states = [
703            VerificationState::OfferReceived,
704            VerificationState::PendingVerify,
705            VerificationState::QuorumVerified,
706            VerificationState::PaidListVerified,
707            VerificationState::QueuedForFetch,
708            VerificationState::Fetching,
709            VerificationState::FetchRetryable,
710            VerificationState::QuorumFailed,
711            VerificationState::QuorumInconclusive,
712        ];
713
714        for terminal in &terminal_states {
715            for non_terminal in &non_terminal_states {
716                assert_ne!(
717                    terminal, non_terminal,
718                    "terminal state {terminal:?} must not equal non-terminal state {non_terminal:?}"
719                );
720            }
721        }
722    }
723
724    /// `has_repair_opportunity` requires BOTH a previous sync AND at least
725    /// one subsequent cycle.
726    #[test]
727    fn repair_opportunity_requires_both_sync_and_cycle() {
728        // last_sync = Some, cycles_since_sync = 0 → false (synced but no cycle yet)
729        let synced_no_cycle = PeerSyncRecord {
730            last_sync: Some(
731                Instant::now()
732                    .checked_sub(std::time::Duration::from_secs(2))
733                    .unwrap_or_else(Instant::now),
734            ),
735            cycles_since_sync: 0,
736        };
737        assert!(
738            !synced_no_cycle.has_repair_opportunity(),
739            "synced with zero subsequent cycles should NOT have repair opportunity"
740        );
741
742        // last_sync = None, cycles_since_sync = 5 → false (never synced)
743        let never_synced = PeerSyncRecord {
744            last_sync: None,
745            cycles_since_sync: 5,
746        };
747        assert!(
748            !never_synced.has_repair_opportunity(),
749            "never-synced peer should NOT have repair opportunity regardless of cycles"
750        );
751
752        // last_sync = Some, cycles_since_sync = 1 → true
753        let ready = PeerSyncRecord {
754            last_sync: Some(
755                Instant::now()
756                    .checked_sub(std::time::Duration::from_secs(5))
757                    .unwrap_or_else(Instant::now),
758            ),
759            cycles_since_sync: 1,
760        };
761        assert!(
762            ready.has_repair_opportunity(),
763            "synced peer with >= 1 cycle SHOULD have repair opportunity"
764        );
765    }
766}