use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::time::Instant;
use serde::{Deserialize, Serialize};
use crate::ant_protocol::XorName;
use saorsa_core::identity::PeerId;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum VerificationState {
OfferReceived,
PendingVerify,
QuorumVerified,
PaidListVerified,
QueuedForFetch,
Fetching,
Stored,
FetchRetryable,
FetchAbandoned,
QuorumFailed,
QuorumInconclusive,
QuorumAbandoned,
Idle,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum HintPipeline {
Replica,
PaidOnly,
}
#[derive(Debug, Clone)]
pub struct VerificationEntry {
pub state: VerificationState,
pub pipeline: HintPipeline,
pub verified_sources: Vec<PeerId>,
pub tried_sources: HashSet<PeerId>,
pub created_at: Instant,
pub hint_sender: PeerId,
}
#[derive(Debug, Clone)]
pub struct FetchCandidate {
pub key: XorName,
pub distance: XorName,
pub sources: Vec<PeerId>,
}
impl Eq for FetchCandidate {}
impl PartialEq for FetchCandidate {
fn eq(&self, other: &Self) -> bool {
self.distance == other.distance && self.key == other.key
}
}
impl Ord for FetchCandidate {
fn cmp(&self, other: &Self) -> Ordering {
other
.distance
.cmp(&self.distance)
.then_with(|| self.key.cmp(&other.key))
}
}
impl PartialOrd for FetchCandidate {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum PresenceEvidence {
Present,
Absent,
Unresolved,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum PaidListEvidence {
Confirmed,
NotFound,
Unresolved,
}
#[derive(Debug, Clone)]
pub struct KeyVerificationEvidence {
pub presence: HashMap<PeerId, PresenceEvidence>,
pub paid_list: HashMap<PeerId, PaidListEvidence>,
}
#[derive(Debug, Clone)]
pub enum FailureEvidence {
ReplicationFailure {
peer: PeerId,
key: XorName,
},
AuditFailure {
challenge_id: u64,
challenged_peer: PeerId,
confirmed_failed_keys: Vec<XorName>,
reason: AuditFailureReason,
},
BootstrapClaimAbuse {
peer: PeerId,
first_seen: Instant,
},
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum AuditFailureReason {
Timeout,
MalformedResponse,
DigestMismatch,
KeyAbsent,
Rejected,
}
#[derive(Debug, Clone)]
pub struct PeerSyncRecord {
pub last_sync: Option<Instant>,
pub cycles_since_sync: u32,
}
impl PeerSyncRecord {
#[must_use]
pub fn has_repair_opportunity(&self) -> bool {
self.last_sync.is_some() && self.cycles_since_sync >= 1
}
}
#[derive(Debug)]
pub struct NeighborSyncState {
pub order: Vec<PeerId>,
pub cursor: usize,
pub last_sync_times: HashMap<PeerId, Instant>,
pub bootstrap_claims: HashMap<PeerId, Instant>,
}
impl NeighborSyncState {
#[must_use]
pub fn new_cycle(close_neighbors: Vec<PeerId>) -> Self {
Self {
order: close_neighbors,
cursor: 0,
last_sync_times: HashMap::new(),
bootstrap_claims: HashMap::new(),
}
}
#[must_use]
pub fn is_cycle_complete(&self) -> bool {
self.cursor >= self.order.len()
}
}
#[derive(Debug)]
pub struct BootstrapState {
pub drained: bool,
pub pending_peer_requests: usize,
pub pending_keys: HashSet<XorName>,
}
impl BootstrapState {
#[must_use]
pub fn new() -> Self {
Self {
drained: false,
pending_peer_requests: 0,
pending_keys: HashSet::new(),
}
}
#[must_use]
pub fn is_drained(&self) -> bool {
self.drained
}
pub fn remove_key(&mut self, key: &XorName) {
self.pending_keys.remove(key);
}
}
impl Default for BootstrapState {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use std::collections::BinaryHeap;
use super::*;
fn peer_id_from_byte(b: u8) -> PeerId {
let mut bytes = [0u8; 32];
bytes[0] = b;
PeerId::from_bytes(bytes)
}
#[test]
fn fetch_candidate_nearest_key_has_highest_priority() {
let near = FetchCandidate {
key: [1u8; 32],
distance: [
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
],
sources: vec![peer_id_from_byte(1)],
};
let far = FetchCandidate {
key: [2u8; 32],
distance: [
0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
],
sources: vec![peer_id_from_byte(2)],
};
assert!(near > far, "nearer candidate should compare greater");
let mut heap = BinaryHeap::new();
heap.push(far.clone());
heap.push(near.clone());
assert_eq!(heap.len(), 2, "heap should contain both candidates");
let first = heap.pop();
assert!(first.is_some(), "first pop should succeed");
assert_eq!(
first.map(|c| c.key),
Some(near.key),
"nearest key should pop first"
);
let second = heap.pop();
assert!(second.is_some(), "second pop should succeed");
assert_eq!(
second.map(|c| c.key),
Some(far.key),
"farthest key should pop second"
);
}
#[test]
fn fetch_candidate_same_distance_and_key_is_equal() {
let a = FetchCandidate {
key: [1u8; 32],
distance: [5u8; 32],
sources: vec![],
};
let b = FetchCandidate {
key: [1u8; 32],
distance: [5u8; 32],
sources: vec![],
};
assert_eq!(
a.cmp(&b),
Ordering::Equal,
"same distance + same key should yield Equal"
);
assert_eq!(a, b, "PartialEq must agree with Ord");
}
#[test]
fn fetch_candidate_same_distance_different_key_is_deterministic() {
let a = FetchCandidate {
key: [1u8; 32],
distance: [5u8; 32],
sources: vec![],
};
let b = FetchCandidate {
key: [2u8; 32],
distance: [5u8; 32],
sources: vec![],
};
assert_ne!(
a.cmp(&b),
Ordering::Equal,
"same distance + different key must not be Equal"
);
assert_ne!(a, b, "PartialEq must agree with Ord");
}
#[test]
fn peer_sync_record_no_sync_yet() {
let record = PeerSyncRecord {
last_sync: None,
cycles_since_sync: 0,
};
assert!(
!record.has_repair_opportunity(),
"never-synced peer has no repair opportunity"
);
}
#[test]
fn peer_sync_record_synced_but_no_cycle() {
let record = PeerSyncRecord {
last_sync: Some(Instant::now()),
cycles_since_sync: 0,
};
assert!(
!record.has_repair_opportunity(),
"synced peer with zero subsequent cycles has no repair opportunity"
);
}
#[test]
fn peer_sync_record_synced_with_cycle() {
let record = PeerSyncRecord {
last_sync: Some(Instant::now()),
cycles_since_sync: 1,
};
assert!(
record.has_repair_opportunity(),
"synced peer with >= 1 cycle should have repair opportunity"
);
}
#[test]
fn peer_sync_record_no_sync_many_cycles() {
let record = PeerSyncRecord {
last_sync: None,
cycles_since_sync: 10,
};
assert!(
!record.has_repair_opportunity(),
"never-synced peer has no repair opportunity regardless of cycle count"
);
}
#[test]
fn neighbor_sync_empty_cycle_is_immediately_complete() {
let state = NeighborSyncState::new_cycle(vec![]);
assert!(
state.is_cycle_complete(),
"empty neighbor list means cycle is complete"
);
}
#[test]
fn neighbor_sync_new_cycle_not_complete() {
let peers = vec![peer_id_from_byte(1), peer_id_from_byte(2)];
let state = NeighborSyncState::new_cycle(peers);
assert!(
!state.is_cycle_complete(),
"fresh cycle with peers should not be complete"
);
}
#[test]
fn neighbor_sync_cycle_completes_when_cursor_reaches_end() {
let peers = vec![
peer_id_from_byte(1),
peer_id_from_byte(2),
peer_id_from_byte(3),
];
let mut state = NeighborSyncState::new_cycle(peers);
state.cursor = 2;
assert!(
!state.is_cycle_complete(),
"cursor at len-1 should not be complete"
);
state.cursor = 3;
assert!(
state.is_cycle_complete(),
"cursor at len should be complete"
);
}
#[test]
fn neighbor_sync_cursor_past_end_is_still_complete() {
let peers = vec![peer_id_from_byte(1)];
let mut state = NeighborSyncState::new_cycle(peers);
state.cursor = 5;
assert!(
state.is_cycle_complete(),
"cursor past end should still report complete"
);
}
#[test]
fn bootstrap_state_initial_not_drained() {
let state = BootstrapState::new();
assert!(
!state.is_drained(),
"initial state must not be drained before bootstrap begins"
);
}
#[test]
fn bootstrap_state_pending_requests_block_drain() {
let mut state = BootstrapState::new();
state.pending_peer_requests = 3;
assert!(
!state.is_drained(),
"pending peer requests should block drain"
);
}
#[test]
fn bootstrap_state_pending_keys_block_drain() {
let mut state = BootstrapState::new();
state.pending_keys.insert([42u8; 32]);
assert!(!state.is_drained(), "pending keys should block drain");
}
#[test]
fn bootstrap_state_explicit_drained_overrides() {
let mut state = BootstrapState::new();
state.pending_peer_requests = 5;
state.pending_keys.insert([99u8; 32]);
state.drained = true;
assert!(
state.is_drained(),
"explicit drained flag should override pending counts"
);
}
#[test]
fn bootstrap_state_requires_explicit_drain() {
let mut state = BootstrapState::new();
state.pending_peer_requests = 2;
state.pending_keys.insert([1u8; 32]);
state.pending_peer_requests = 0;
state.pending_keys.clear();
assert!(
!state.is_drained(),
"clearing counters alone must not drain — requires check_bootstrap_drained"
);
state.drained = true;
assert!(state.is_drained(), "explicit flag should drain");
}
#[test]
fn bootstrap_state_default_matches_new() {
let from_new = BootstrapState::new();
let from_default = BootstrapState::default();
assert_eq!(from_new.drained, from_default.drained);
assert_eq!(
from_new.pending_peer_requests,
from_default.pending_peer_requests
);
assert_eq!(from_new.pending_keys, from_default.pending_keys);
}
#[test]
fn bootstrap_drain_requires_empty_pending_keys() {
let key_a: XorName = [0xA0; 32];
let key_b: XorName = [0xB0; 32];
let key_c: XorName = [0xC0; 32];
let mut state = BootstrapState::new();
state.pending_peer_requests = 0; state.pending_keys = std::iter::once(key_a)
.chain(std::iter::once(key_b))
.chain(std::iter::once(key_c))
.collect();
assert!(
!state.is_drained(),
"should NOT be drained while pending_keys still has entries"
);
state.pending_keys.remove(&key_a);
assert!(!state.is_drained(), "still not drained with 2 pending keys");
state.pending_keys.remove(&key_b);
assert!(!state.is_drained(), "still not drained with 1 pending key");
state.pending_keys.remove(&key_c);
assert!(
!state.is_drained(),
"removing all keys is necessary but not sufficient — needs explicit drain"
);
state.drained = true;
assert!(state.is_drained(), "explicit drain flag should finalize");
}
#[test]
fn verification_state_terminal_variants() {
let terminal_states = [
VerificationState::QuorumAbandoned,
VerificationState::FetchAbandoned,
VerificationState::Stored,
VerificationState::Idle,
];
for (i, a) in terminal_states.iter().enumerate() {
for (j, b) in terminal_states.iter().enumerate() {
if i != j {
assert_ne!(
a, b,
"terminal states at indices {i} and {j} must be distinct"
);
}
}
}
let non_terminal_states = [
VerificationState::OfferReceived,
VerificationState::PendingVerify,
VerificationState::QuorumVerified,
VerificationState::PaidListVerified,
VerificationState::QueuedForFetch,
VerificationState::Fetching,
VerificationState::FetchRetryable,
VerificationState::QuorumFailed,
VerificationState::QuorumInconclusive,
];
for terminal in &terminal_states {
for non_terminal in &non_terminal_states {
assert_ne!(
terminal, non_terminal,
"terminal state {terminal:?} must not equal non-terminal state {non_terminal:?}"
);
}
}
}
#[test]
fn repair_opportunity_requires_both_sync_and_cycle() {
let synced_no_cycle = PeerSyncRecord {
last_sync: Some(
Instant::now()
.checked_sub(std::time::Duration::from_secs(2))
.unwrap_or_else(Instant::now),
),
cycles_since_sync: 0,
};
assert!(
!synced_no_cycle.has_repair_opportunity(),
"synced with zero subsequent cycles should NOT have repair opportunity"
);
let never_synced = PeerSyncRecord {
last_sync: None,
cycles_since_sync: 5,
};
assert!(
!never_synced.has_repair_opportunity(),
"never-synced peer should NOT have repair opportunity regardless of cycles"
);
let ready = PeerSyncRecord {
last_sync: Some(
Instant::now()
.checked_sub(std::time::Duration::from_secs(5))
.unwrap_or_else(Instant::now),
),
cycles_since_sync: 1,
};
assert!(
ready.has_repair_opportunity(),
"synced peer with >= 1 cycle SHOULD have repair opportunity"
);
}
}