#![allow(clippy::needless_pass_by_value)]
use std::sync::Arc;
use ipld_core::ipld::Ipld;
use proptest::prelude::*;
use mnem_core::id::{ChangeId, Cid, EdgeId, NodeId};
use mnem_core::objects::{Edge, Node};
use mnem_core::repo::{CommitOptions, ReadonlyRepo};
use mnem_core::store::{Blockstore, MemoryBlockstore, MemoryOpHeadsStore, OpHeadsStore};
fn arb_node_id() -> impl Strategy<Value = NodeId> {
any::<[u8; 16]>().prop_map(NodeId::from_bytes_raw)
}
fn arb_edge_id() -> impl Strategy<Value = EdgeId> {
any::<[u8; 16]>().prop_map(EdgeId::from_bytes_raw)
}
fn arb_ntype() -> impl Strategy<Value = String> {
prop_oneof![
Just("Person".to_string()),
Just("Place".to_string()),
Just("Event".to_string()),
Just("Concept".to_string()),
]
}
fn arb_rel() -> impl Strategy<Value = String> {
prop_oneof![
Just("knows".to_string()),
Just("located_in".to_string()),
Just("part_of".to_string()),
Just("caused_by".to_string()),
]
}
fn arb_node(id: NodeId) -> impl Strategy<Value = Node> {
(
arb_ntype(),
"[a-z ]{0,24}",
prop::collection::btree_map("[a-z]{1,6}", "[a-z0-9]{0,12}", 0..4),
)
.prop_map(move |(ntype, summary, props)| {
let mut n = Node::new(id, ntype).with_summary(summary);
for (k, v) in props {
n = n.with_prop(k, Ipld::String(v));
}
n
})
}
#[derive(Clone, Debug)]
enum TxOp {
AddNode(Node),
AddEdge(Edge),
Tombstone(NodeId),
}
fn arb_commit_sequence(n_nodes: usize, n_edges: usize) -> impl Strategy<Value = Vec<TxOp>> {
prop::collection::vec(arb_node_id(), 1..=n_nodes)
.prop_map(|ids| {
let mut seen = std::collections::HashSet::new();
ids.into_iter()
.filter(|id| seen.insert(*id))
.collect::<Vec<_>>()
})
.prop_flat_map(move |ids: Vec<NodeId>| {
let node_count = ids.len();
let ids_for_nodes = ids.clone();
let ids_for_edges = ids.clone();
let ids_for_tombs = ids.clone();
let nodes =
prop::collection::vec(0u64..u64::MAX, node_count).prop_flat_map(move |_seeds| {
let per_node: Vec<_> = ids_for_nodes
.iter()
.copied()
.map(|id| arb_node(id).prop_map(TxOp::AddNode).boxed())
.collect();
per_node
});
let edges = prop::collection::vec(
(0..node_count, 0..node_count, arb_rel(), arb_edge_id()),
0..=n_edges,
)
.prop_map(move |es| {
let mut seen = std::collections::HashSet::new();
es.into_iter()
.filter(|(_, _, _, eid)| seen.insert(*eid))
.map(|(s, d, rel, eid)| {
TxOp::AddEdge(Edge::new(eid, rel, ids_for_edges[s], ids_for_edges[d]))
})
.collect::<Vec<_>>()
});
let tombstone_count = node_count / 4;
let tombs =
prop::collection::vec(0..node_count, 0..=tombstone_count).prop_map(move |idxs| {
let mut seen = std::collections::HashSet::new();
idxs.into_iter()
.filter(|i| seen.insert(*i))
.map(|i| TxOp::Tombstone(ids_for_tombs[i]))
.collect::<Vec<_>>()
});
(nodes, edges, tombs).prop_map(|(n, e, t)| {
let mut out = Vec::with_capacity(n.len() + e.len() + t.len());
out.extend(n);
out.extend(e);
out.extend(t);
out
})
})
}
fn fresh_stores() -> (Arc<dyn Blockstore>, Arc<dyn OpHeadsStore>) {
(
Arc::new(MemoryBlockstore::new()),
Arc::new(MemoryOpHeadsStore::new()),
)
}
fn pinned_opts<'a>(author: &'a str, message: &'a str) -> CommitOptions<'a> {
CommitOptions::new(author, message)
.with_time_micros(1_700_000_000_000_000)
.with_change_id(ChangeId::from_bytes_raw([0x42; 16]))
}
fn apply_single_commit(ops: &[TxOp]) -> Option<Cid> {
let (bs, ohs) = fresh_stores();
let repo = ReadonlyRepo::init(bs, ohs).ok()?;
let mut tx = repo.start_transaction();
for op in ops {
match op {
TxOp::AddNode(n) => {
tx.add_node(n).ok()?;
}
TxOp::AddEdge(e) => {
tx.add_edge(e).ok()?;
}
TxOp::Tombstone(id) => {
tx.tombstone_node(*id, "proptest").ok()?;
}
}
}
let repo = tx.commit_opts(pinned_opts("proptest", "single")).ok()?;
repo.view().heads.first().cloned()
}
fn apply_batched(ops: &[TxOp], k: usize) -> Option<(Cid, Cid)> {
let k = k.max(1).min(ops.len().max(1));
let (bs, ohs) = fresh_stores();
let mut repo = ReadonlyRepo::init(bs, ohs).ok()?;
let chunk_size = ops.len().div_ceil(k);
for (batch_idx, chunk) in ops.chunks(chunk_size).enumerate() {
let mut tx = repo.start_transaction();
for op in chunk {
match op {
TxOp::AddNode(n) => {
tx.add_node(n).ok()?;
}
TxOp::AddEdge(e) => {
tx.add_edge(e).ok()?;
}
TxOp::Tombstone(id) => {
tx.tombstone_node(*id, "proptest").ok()?;
}
}
}
let mut cid_bytes = [0u8; 16];
cid_bytes[0] = batch_idx as u8;
let opts = CommitOptions::new("proptest", "batch")
.with_time_micros(1_700_000_000_000_000 + batch_idx as u64)
.with_change_id(ChangeId::from_bytes_raw(cid_bytes));
repo = tx.commit_opts(opts).ok()?;
}
let head = repo.view().heads.first().cloned()?;
let commit = repo.head_commit()?;
let index_set = commit.indexes.clone()?;
Some((head, index_set))
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 64,
max_shrink_iters: 1000,
.. ProptestConfig::default()
})]
#[test]
fn p1_add_order_permutation(
ops in arb_commit_sequence(16, 24),
shuffle_seed in any::<u64>(),
) {
let cid_a = apply_single_commit(&ops);
prop_assume!(cid_a.is_some());
let mut shuffled = ops.clone();
let mut state = shuffle_seed | 1; for i in (1..shuffled.len()).rev() {
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
let j = (state as usize) % (i + 1);
shuffled.swap(i, j);
}
let cid_b = apply_single_commit(&shuffled);
prop_assert_eq!(
cid_a, cid_b,
"commit CID must be invariant under op-order permutation"
);
}
#[test]
fn p2_incremental_vs_full(
ops in arb_commit_sequence(16, 16),
k in 2usize..=8,
) {
let append_only: Vec<TxOp> = ops
.into_iter()
.filter(|op| !matches!(op, TxOp::Tombstone(_)))
.collect();
prop_assume!(!append_only.is_empty());
let full = apply_batched(&append_only, 1);
let incr = apply_batched(&append_only, k);
prop_assume!(full.is_some() && incr.is_some());
let (_full_head, full_idx) = full.unwrap();
let (_incr_head, incr_idx) = incr.unwrap();
prop_assert_eq!(
full_idx, incr_idx,
"IndexSet CID must match between full rebuild and k={} incremental batches",
k
);
}
#[test]
fn p3_retrieve_determinism(
ops in arb_commit_sequence(12, 8),
) {
let nodes_only: Vec<TxOp> = ops
.into_iter()
.filter(|op| matches!(op, TxOp::AddNode(_)))
.collect();
prop_assume!(!nodes_only.is_empty());
let (bs1, ohs1) = fresh_stores();
let repo1 = ReadonlyRepo::init(bs1, ohs1).unwrap();
let mut tx = repo1.start_transaction();
for op in &nodes_only {
if let TxOp::AddNode(n) = op {
tx.add_node(n).unwrap();
}
}
let repo1 = tx.commit_opts(pinned_opts("proptest", "p3")).unwrap();
let (bs2, ohs2) = fresh_stores();
let repo2 = ReadonlyRepo::init(bs2, ohs2).unwrap();
let mut tx = repo2.start_transaction();
for op in &nodes_only {
if let TxOp::AddNode(n) = op {
tx.add_node(n).unwrap();
}
}
let repo2 = tx.commit_opts(pinned_opts("proptest", "p3")).unwrap();
for label in ["Person", "Place", "Event", "Concept"] {
let hits1 = repo1
.query()
.label(label)
.execute()
.unwrap()
.into_iter()
.map(|h| h.node.id)
.collect::<Vec<_>>();
let hits2 = repo2
.query()
.label(label)
.execute()
.unwrap()
.into_iter()
.map(|h| h.node.id)
.collect::<Vec<_>>();
prop_assert_eq!(
hits1, hits2,
"query(label={}) must return byte-identical hit sequence",
label
);
}
}
#[test]
fn p5_tombstone_idempotent(
id in arb_node_id(),
extra in arb_node_id(),
) {
prop_assume!(id != extra);
let seed_ops = vec![TxOp::AddNode(Node::new(id, "Person"))];
let cid_seed = apply_single_commit(&seed_ops);
prop_assume!(cid_seed.is_some());
let once_ops = vec![
TxOp::AddNode(Node::new(id, "Person")),
TxOp::Tombstone(id),
];
let twice_ops = vec![
TxOp::AddNode(Node::new(id, "Person")),
TxOp::Tombstone(id),
TxOp::Tombstone(id),
];
let cid_once = apply_single_commit(&once_ops);
let cid_twice = apply_single_commit(&twice_ops);
prop_assert_eq!(
cid_once, cid_twice,
"tombstone_node must be idempotent within one commit"
);
}
}
#[cfg(feature = "ci-slow")]
proptest! {
#![proptest_config(ProptestConfig {
cases: 256,
max_shrink_iters: 2000,
.. ProptestConfig::default()
})]
#[test]
fn p1_add_order_permutation_slow(
ops in arb_commit_sequence(64, 128),
shuffle_seed in any::<u64>(),
) {
let cid_a = apply_single_commit(&ops);
prop_assume!(cid_a.is_some());
let mut shuffled = ops.clone();
let mut state = shuffle_seed | 1;
for i in (1..shuffled.len()).rev() {
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
let j = (state as usize) % (i + 1);
shuffled.swap(i, j);
}
let cid_b = apply_single_commit(&shuffled);
prop_assert_eq!(cid_a, cid_b);
}
}