use ff::PrimeField;
use kontor_crypto::{
api::{self, Challenge, FieldElement, PorSystem},
merkle,
poseidon::{domain_tags, poseidon_hash_tagged},
FileLedger,
};
use std::collections::BTreeMap;
mod common;
use common::{
create_multi_file_ledger, create_single_file_ledger,
fixtures::{create_test_data, setup_test_scenario, TestConfig},
};
#[test]
fn test_endianness_sanity_after_fix() {
println!("Testing endianness sanity after fix...");
let one_byte = vec![0x01];
let hash_one = merkle::get_leaf_hash(&one_byte).unwrap();
assert_eq!(
hash_one,
FieldElement::from(1u64),
"Single byte 0x01 should map to field element 1"
);
for i in 0u8..10 {
let byte_data = vec![i];
let hash = merkle::get_leaf_hash(&byte_data).unwrap();
assert_eq!(
hash,
FieldElement::from(i as u64),
"Byte {} should map to field element {}",
i,
i
);
}
let field_val = FieldElement::from(42u64);
let field_bytes = field_val.to_repr();
let mut test_bytes = vec![0u8; 8];
test_bytes.copy_from_slice(&field_bytes[..8]);
let hash = merkle::get_leaf_hash(&test_bytes).unwrap();
assert_eq!(
hash, field_val,
"Round-trip through bytes should preserve field element value"
);
println!("✓ Endianness is correctly implemented");
}
#[test]
fn test_meta_commitment_binding() {
println!("Testing meta-commitment binding...");
let setup = setup_test_scenario(&TestConfig::multi_file(2)).unwrap();
let ledger = setup.ledger.as_ref().unwrap();
let file_refs = setup.file_refs();
let system = PorSystem::new(ledger);
let files_vec: Vec<&_> = file_refs.values().copied().collect();
let proof = system
.prove(files_vec, &setup.challenges)
.expect("Should generate valid proof");
assert!(
system
.verify(&proof, &setup.challenges)
.expect("Verification should complete"),
"Original proof should verify"
);
let mut tampered_challenges = setup.challenges.clone();
let mut tampered_metadata = tampered_challenges[0].file_metadata.clone();
tampered_metadata.root = FieldElement::from(999999u64); tampered_challenges[0] = Challenge::new_test(
tampered_metadata.clone(),
tampered_challenges[0].block_height,
tampered_challenges[0].num_challenges,
tampered_challenges[0].seed,
);
let metadatas_refs: Vec<&_> = vec![&tampered_metadata, &setup.challenges[1].file_metadata];
let tampered_ledger = create_multi_file_ledger(&metadatas_refs);
let tampered_system = api::PorSystem::new(&tampered_ledger);
let result = tampered_system.verify(&proof, &tampered_challenges);
match result {
Ok(false) => {
println!("✓ Meta-commitment correctly rejected tampered root");
}
Err(_) => {
println!("✓ Meta-commitment validation caused verification error");
}
Ok(true) => {
panic!("Meta-commitment should prevent verification with tampered root!");
}
}
}
#[test]
fn test_multi_file_challenge_separation() {
println!("Testing multi-file challenge separation...");
let seed = FieldElement::from(42u64);
let state = FieldElement::from(100u64);
let challenge_base = poseidon_hash_tagged(domain_tags::challenge(), seed, state);
let challenge_file_0 = poseidon_hash_tagged(
domain_tags::challenge_per_file(),
challenge_base,
FieldElement::from(0u64),
);
let challenge_file_1 = poseidon_hash_tagged(
domain_tags::challenge_per_file(),
challenge_base,
FieldElement::from(1u64),
);
assert_ne!(
challenge_file_0, challenge_file_1,
"Challenges for different files must differ"
);
let linear_diff = challenge_file_1 - challenge_file_0;
assert_ne!(
linear_diff,
FieldElement::from(1u64),
"Challenges should not have linear relationship"
);
let mut challenges = Vec::new();
for i in 0..5 {
let challenge = poseidon_hash_tagged(
domain_tags::challenge_per_file(),
challenge_base,
FieldElement::from(i as u64),
);
challenges.push(challenge);
}
for i in 0..challenges.len() {
for j in (i + 1)..challenges.len() {
assert_ne!(
challenges[i], challenges[j],
"All file challenges must be unique"
);
}
}
println!("✓ Multi-file challenges are properly separated");
}
#[test]
fn test_single_vs_multi_file_equivalence() {
println!("Testing single-file vs multi-file equivalence...");
let data = create_test_data(100, Some(42));
let (prepared, metadata) =
api::prepare_file(&data, "test_file.dat").expect("Failed to prepare file");
let challenge = Challenge::new_test(metadata.clone(), 1000, 3, FieldElement::from(123u64));
let mut single_files = BTreeMap::new();
single_files.insert(metadata.file_id.clone(), &prepared);
let single_ledger = create_single_file_ledger(&metadata);
let single_system = api::PorSystem::new(&single_ledger);
let single_files_vec: Vec<&_> = single_files.values().copied().collect();
let single_proof = single_system
.prove(single_files_vec, std::slice::from_ref(&challenge))
.expect("Should generate single-file proof");
assert!(
single_system
.verify(&single_proof, std::slice::from_ref(&challenge))
.expect("Single-file verification should complete"),
"Single-file proof should verify"
);
let data2 = create_test_data(50, Some(99));
let (prepared2, metadata2) =
api::prepare_file(&data2, "test_file.dat").expect("Failed to prepare file 2");
let challenge2 = Challenge::new_test(metadata2.clone(), 1000, 3, FieldElement::from(123u64));
let mut ledger = FileLedger::new();
ledger
.add_file(
metadata.file_id.clone(),
metadata.root,
kontor_crypto::api::tree_depth_from_metadata(&metadata),
)
.unwrap();
ledger
.add_file(
metadata2.file_id.clone(),
metadata2.root,
kontor_crypto::api::tree_depth_from_metadata(&metadata2),
)
.unwrap();
let mut multi_files = BTreeMap::new();
multi_files.insert(metadata.file_id.clone(), &prepared);
multi_files.insert(metadata2.file_id.clone(), &prepared2);
let multi_system = api::PorSystem::new(&ledger);
let multi_files_vec: Vec<&_> = multi_files.values().copied().collect();
let multi_proof = multi_system
.prove(multi_files_vec, &[challenge.clone(), challenge2.clone()])
.expect("Should generate multi-file proof");
assert!(
multi_system
.verify(&multi_proof, &[challenge, challenge2])
.expect("Multi-file verification should complete"),
"Multi-file proof should verify"
);
println!("✓ Single-file and multi-file proofs both work correctly");
}
#[test]
fn test_gating_uniformity() {
println!("Testing gating uniformity across different file depths...");
let data_small = create_test_data(1_000, Some(1));
let (prepared_small, metadata_small) =
api::prepare_file(&data_small, "test_file.dat").expect("Failed to prepare small file");
let data_large = create_test_data(50_000, Some(2));
let (prepared_large, metadata_large) =
api::prepare_file(&data_large, "test_file.dat").expect("Failed to prepare large file");
let depth_small = kontor_crypto::api::tree_depth_from_metadata(&metadata_small);
let depth_large = kontor_crypto::api::tree_depth_from_metadata(&metadata_large);
assert_ne!(
depth_small, depth_large,
"Test files should have different depths"
);
println!(
" Small file depth: {}, Large file depth: {}",
depth_small, depth_large
);
let challenge_small =
Challenge::new_test(metadata_small.clone(), 1000, 2, FieldElement::from(456u64));
let challenge_large =
Challenge::new_test(metadata_large.clone(), 1000, 2, FieldElement::from(456u64));
let mut ledger = FileLedger::new();
ledger
.add_file(
metadata_small.file_id.clone(),
metadata_small.root,
depth_small,
)
.unwrap();
ledger
.add_file(
metadata_large.file_id.clone(),
metadata_large.root,
depth_large,
)
.unwrap();
let mut files = BTreeMap::new();
files.insert(metadata_small.file_id.clone(), &prepared_small);
files.insert(metadata_large.file_id.clone(), &prepared_large);
let system = api::PorSystem::new(&ledger);
let files_vec: Vec<&_> = files.values().copied().collect();
let proof = system
.prove(
files_vec,
&[challenge_small.clone(), challenge_large.clone()],
)
.expect("Should generate proof with mixed depths");
assert!(
system
.verify(&proof, &[challenge_small, challenge_large])
.expect("Verification should complete"),
"Proof with mixed depths should verify"
);
println!("✓ Gating allows uniform parameters across different depths");
}