use hopper_core::account::segment_role::{
SegmentRole, SEG_ROLE_AUDIT, SEG_ROLE_CACHE, SEG_ROLE_CORE, SEG_ROLE_EXTENSION, SEG_ROLE_INDEX,
SEG_ROLE_JOURNAL, SEG_ROLE_SHARD,
};
use hopper_core::account::*;
use hopper_core::check::graph::{ValidationContext, ValidationGraph};
use hopper_core::check::{
check_no_subsequent_invocation, current_instruction_index, detect_flash_loan_bracket,
instruction_count, read_program_id_at, require_top_level,
};
use hopper_core::collections::{
bitmap_bytes, Journal, PackedMap, Slab, SortedVec, JOURNAL_HEADER_SIZE, SLAB_HEADER_SIZE,
};
use hopper_core::receipt::{StateReceipt, RECEIPT_SIZE};
fn build_ix_sysvar(instructions: &[&[u8; 32]], current_idx: u16) -> Vec<u8> {
let num_ix = instructions.len() as u16;
let mut buf = Vec::new();
buf.extend_from_slice(&num_ix.to_le_bytes());
let offset_table_start = buf.len();
for _ in 0..num_ix {
buf.extend_from_slice(&0u16.to_le_bytes());
}
let mut offsets = Vec::new();
for &program_id in instructions {
offsets.push(buf.len() as u16);
buf.extend_from_slice(&0u16.to_le_bytes());
buf.extend_from_slice(program_id);
buf.extend_from_slice(&0u16.to_le_bytes());
}
for (i, offset) in offsets.iter().enumerate() {
let pos = offset_table_start + i * 2;
let bytes = offset.to_le_bytes();
buf[pos] = bytes[0];
buf[pos + 1] = bytes[1];
}
buf.extend_from_slice(¤t_idx.to_le_bytes());
buf
}
#[test]
fn cpi_guard_require_top_level_passes_when_current_matches() {
let our_program = [1u8; 32];
let sysvar = build_ix_sysvar(&[&our_program], 0);
assert!(require_top_level(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_require_top_level_fails_when_current_is_different() {
let our_program = [1u8; 32];
let other_program = [2u8; 32];
let sysvar = build_ix_sysvar(&[&other_program], 0);
assert!(require_top_level(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_err());
}
#[test]
fn cpi_guard_require_top_level_multi_instruction() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&other, &our_program, &other], 1);
assert!(require_top_level(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_flash_loan_bracket_detected() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&our_program, &other, &our_program], 1);
assert!(detect_flash_loan_bracket(&sysvar, unsafe {
&*(&other as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
assert!(detect_flash_loan_bracket(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_err());
}
#[test]
fn cpi_guard_flash_loan_no_bracket() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&our_program, &other], 1);
assert!(detect_flash_loan_bracket(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_flash_loan_only_before() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&our_program, &our_program, &other], 2);
assert!(detect_flash_loan_bracket(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_no_subsequent_invocation_pass() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&our_program, &other], 0);
assert!(check_no_subsequent_invocation(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_no_subsequent_invocation_fail() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&other, &our_program], 0);
assert!(
check_no_subsequent_invocation(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_err(),
"Expected Err: our program appears after current instruction"
);
}
#[test]
fn cpi_guard_no_subsequent_invocation_at_last() {
let our_program = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&other, &other, &our_program], 2);
assert!(check_no_subsequent_invocation(&sysvar, unsafe {
&*(&our_program as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_instruction_count_parsing() {
let our = [1u8; 32];
let sysvar = build_ix_sysvar(&[&our, &our, &our], 1);
assert_eq!(instruction_count(&sysvar).unwrap(), 3);
assert_eq!(current_instruction_index(&sysvar).unwrap(), 1);
}
#[test]
fn cpi_guard_program_id_at_each_index() {
let p0 = [10u8; 32];
let p1 = [20u8; 32];
let p2 = [30u8; 32];
let sysvar = build_ix_sysvar(&[&p0, &p1, &p2], 0);
assert_eq!(read_program_id_at(&sysvar, 0).unwrap(), p0);
assert_eq!(read_program_id_at(&sysvar, 1).unwrap(), p1);
assert_eq!(read_program_id_at(&sysvar, 2).unwrap(), p2);
assert!(read_program_id_at(&sysvar, 3).is_err());
}
#[test]
fn cpi_guard_empty_sysvar_rejects() {
assert!(instruction_count(&[]).is_err());
assert!(current_instruction_index(&[0]).is_err());
}
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
struct Entry8([u8; 8]);
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Zeroable for Entry8 {}
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Pod for Entry8 {}
unsafe impl Pod for Entry8 {}
impl FixedLayout for Entry8 {
const SIZE: usize = 8;
}
impl Entry8 {
fn new(val: u64) -> Self {
Self(val.to_le_bytes())
}
fn val(&self) -> u64 {
u64::from_le_bytes(self.0)
}
}
#[test]
fn journal_strict_mode_fills_and_rejects() {
let cap = 4;
let mut buf = vec![0u8; JOURNAL_HEADER_SIZE + cap * Entry8::SIZE];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
for i in 0..cap {
journal.append(Entry8::new(i as u64 + 100)).unwrap();
assert_eq!(journal.entry_count(), i + 1);
}
assert!(journal.append(Entry8::new(999)).is_err());
assert_eq!(journal.entry_count(), cap);
}
#[test]
fn journal_strict_mode_read_ordering() {
let cap = 4;
let mut buf = vec![0u8; JOURNAL_HEADER_SIZE + cap * Entry8::SIZE];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
for i in 0..cap {
journal.append(Entry8::new(i as u64)).unwrap();
}
for i in 0..cap {
assert_eq!(journal.read(i).unwrap().val(), i as u64);
}
assert_eq!(journal.latest().unwrap().val(), 3);
}
#[test]
fn journal_circular_mode_wraps() {
let cap = 3;
let mut buf = vec![0u8; JOURNAL_HEADER_SIZE + cap * Entry8::SIZE];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(true);
for i in 0..5u64 {
journal.append(Entry8::new(i + 10)).unwrap();
}
assert_eq!(journal.entry_count(), cap); assert!(journal.has_wrapped());
assert_eq!(journal.total_written(), 5);
assert_eq!(journal.read(0).unwrap().val(), 12);
assert_eq!(journal.read(1).unwrap().val(), 13);
assert_eq!(journal.read(2).unwrap().val(), 14);
assert_eq!(journal.latest().unwrap().val(), 14);
}
#[test]
fn journal_circular_wrap_many_times() {
let cap = 2;
let mut buf = vec![0u8; JOURNAL_HEADER_SIZE + cap * Entry8::SIZE];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(true);
for i in 0..100u64 {
journal.append(Entry8::new(i)).unwrap();
}
assert_eq!(journal.total_written(), 100);
assert_eq!(journal.entry_count(), 2);
assert_eq!(journal.read(0).unwrap().val(), 98);
assert_eq!(journal.read(1).unwrap().val(), 99);
}
#[test]
fn journal_empty_read_fails() {
let mut buf = vec![0u8; JOURNAL_HEADER_SIZE + 4 * Entry8::SIZE];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
assert!(journal.read(0).is_err());
assert!(journal.latest().is_err());
}
#[test]
fn journal_required_bytes() {
assert_eq!(
Journal::<Entry8>::required_bytes(10),
JOURNAL_HEADER_SIZE + 10 * 8
);
}
#[test]
fn journal_too_small_buffer_rejects() {
let mut buf = vec![0u8; 4]; assert!(Journal::<Entry8>::from_bytes_mut(&mut buf).is_err());
}
#[test]
fn slab_alloc_get_free_cycle() {
let cap = 4;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let mut indices = Vec::new();
for i in 0..4u64 {
let idx = slab.alloc(Entry8::new(i + 100)).unwrap();
indices.push(idx);
}
for (i, &idx) in indices.iter().enumerate() {
assert_eq!(slab.get(idx).unwrap().val(), i as u64 + 100);
assert!(slab.is_slot_allocated(idx));
}
assert!(slab.is_full());
assert!(slab.alloc(Entry8::new(999)).is_err());
slab.free(indices[1]).unwrap();
assert!(!slab.is_slot_allocated(indices[1]));
assert_eq!(slab.count(), 3);
let new_idx = slab.alloc(Entry8::new(555)).unwrap();
assert_eq!(new_idx, indices[1]); assert_eq!(slab.get(new_idx).unwrap().val(), 555);
}
#[test]
fn slab_double_free_rejected() {
let cap = 2;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let idx = slab.alloc(Entry8::new(1)).unwrap();
slab.free(idx).unwrap();
assert!(slab.free(idx).is_err(), "Double-free should be rejected");
}
#[test]
fn slab_read_freed_slot_rejected() {
let cap = 2;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let idx = slab.alloc(Entry8::new(42)).unwrap();
slab.free(idx).unwrap();
assert!(slab.get(idx).is_err(), "Read of freed slot should fail");
}
#[test]
fn slab_out_of_bounds_rejected() {
let cap = 2;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
assert!(slab.get(99).is_err());
}
#[test]
fn slab_alloc_free_all_then_realloc() {
let cap = 3;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let i0 = slab.alloc(Entry8::new(10)).unwrap();
let i1 = slab.alloc(Entry8::new(20)).unwrap();
let i2 = slab.alloc(Entry8::new(30)).unwrap();
slab.free(i0).unwrap();
slab.free(i1).unwrap();
slab.free(i2).unwrap();
assert_eq!(slab.count(), 0);
assert!(!slab.is_full());
for i in 0..3u64 {
slab.alloc(Entry8::new(i + 200)).unwrap();
}
assert_eq!(slab.count(), 3);
assert!(slab.is_full());
}
#[test]
fn slab_too_small_buffer_rejects() {
let mut buf = vec![0u8; 4]; assert!(Slab::<Entry8>::from_bytes_mut(&mut buf).is_err());
}
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Key4([u8; 4]);
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Zeroable for Key4 {}
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Pod for Key4 {}
unsafe impl Pod for Key4 {}
impl FixedLayout for Key4 {
const SIZE: usize = 4;
}
impl Key4 {
fn new(v: u32) -> Self {
Self(v.to_le_bytes())
}
}
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Val8([u8; 8]);
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Zeroable for Val8 {}
unsafe impl ::hopper_runtime::__hopper_native::bytemuck::Pod for Val8 {}
unsafe impl Pod for Val8 {}
impl FixedLayout for Val8 {
const SIZE: usize = 8;
}
impl Val8 {
fn new(v: u64) -> Self {
Self(v.to_le_bytes())
}
fn val(&self) -> u64 {
u64::from_le_bytes(self.0)
}
}
#[test]
fn packed_map_insert_get_remove() {
let entry_size = Key4::SIZE + Val8::SIZE; let cap = 4;
let mut buf = vec![0u8; 4 + cap * entry_size];
let mut map = PackedMap::<Key4, Val8>::from_bytes(&mut buf).unwrap();
assert!(!map.insert(Key4::new(1), Val8::new(100)).unwrap()); assert!(!map.insert(Key4::new(2), Val8::new(200)).unwrap());
assert!(!map.insert(Key4::new(3), Val8::new(300)).unwrap());
assert_eq!(map.len(), 3);
assert!(map.contains(&Key4::new(2)));
assert_eq!(map.get(&Key4::new(2)).unwrap().val(), 200);
assert!(map.insert(Key4::new(2), Val8::new(999)).unwrap()); assert_eq!(map.get(&Key4::new(2)).unwrap().val(), 999);
assert_eq!(map.len(), 3);
let removed = map.remove(&Key4::new(1)).unwrap();
assert_eq!(removed.val(), 100);
assert_eq!(map.len(), 2);
assert!(!map.contains(&Key4::new(1)));
assert!(map.remove(&Key4::new(99)).is_err());
}
#[test]
fn packed_map_full_rejects() {
let entry_size = Key4::SIZE + Val8::SIZE;
let cap = 2;
let mut buf = vec![0u8; 4 + cap * entry_size];
let mut map = PackedMap::<Key4, Val8>::from_bytes(&mut buf).unwrap();
map.insert(Key4::new(1), Val8::new(10)).unwrap();
map.insert(Key4::new(2), Val8::new(20)).unwrap();
assert!(map.is_full());
assert!(map.insert(Key4::new(3), Val8::new(30)).is_err());
}
#[test]
fn packed_map_empty_queries() {
let mut buf = vec![0u8; 4 + 4 * (Key4::SIZE + Val8::SIZE)];
let map = PackedMap::<Key4, Val8>::from_bytes(&mut buf).unwrap();
assert!(map.is_empty());
assert_eq!(map.len(), 0);
assert!(map.get(&Key4::new(0)).is_err());
assert!(!map.contains(&Key4::new(0)));
}
#[test]
fn sorted_vec_maintains_order() {
let cap = 8;
let mut buf = vec![0u8; 4 + cap * Entry8::SIZE];
let mut sv = SortedVec::<Entry8>::from_bytes(&mut buf).unwrap();
for &v in &[50u64, 10, 40, 20, 30] {
sv.insert(Entry8::new(v)).unwrap();
}
assert_eq!(sv.len(), 5);
let vals: Vec<u64> = (0..sv.len()).map(|i| sv.get(i).unwrap().val()).collect();
assert_eq!(vals, vec![10, 20, 30, 40, 50]);
}
#[test]
fn sorted_vec_binary_search() {
let cap = 8;
let mut buf = vec![0u8; 4 + cap * Entry8::SIZE];
let mut sv = SortedVec::<Entry8>::from_bytes(&mut buf).unwrap();
for &v in &[10u64, 20, 30, 40, 50] {
sv.insert(Entry8::new(v)).unwrap();
}
assert!(sv.contains(&Entry8::new(30)));
assert!(!sv.contains(&Entry8::new(25)));
}
#[test]
fn sorted_vec_remove_maintains_order() {
let cap = 8;
let mut buf = vec![0u8; 4 + cap * Entry8::SIZE];
let mut sv = SortedVec::<Entry8>::from_bytes(&mut buf).unwrap();
for &v in &[10u64, 20, 30, 40, 50] {
sv.insert(Entry8::new(v)).unwrap();
}
sv.remove_value(&Entry8::new(30)).unwrap();
assert_eq!(sv.len(), 4);
let vals: Vec<u64> = (0..sv.len()).map(|i| sv.get(i).unwrap().val()).collect();
assert_eq!(vals, vec![10, 20, 40, 50]);
}
#[test]
fn sorted_vec_duplicate_insert() {
let cap = 8;
let mut buf = vec![0u8; 4 + cap * Entry8::SIZE];
let mut sv = SortedVec::<Entry8>::from_bytes(&mut buf).unwrap();
sv.insert(Entry8::new(10)).unwrap();
let result = sv.insert(Entry8::new(10));
let vals: Vec<u64> = (0..sv.len()).map(|i| sv.get(i).unwrap().val()).collect();
let mut sorted = vals.clone();
sorted.sort();
assert_eq!(
vals, sorted,
"SortedVec must stay sorted even with duplicate inserts"
);
let _ = result; }
#[test]
fn sorted_vec_capacity_full() {
let cap = 3;
let mut buf = vec![0u8; 4 + cap * Entry8::SIZE];
let mut sv = SortedVec::<Entry8>::from_bytes(&mut buf).unwrap();
sv.insert(Entry8::new(1)).unwrap();
sv.insert(Entry8::new(2)).unwrap();
sv.insert(Entry8::new(3)).unwrap();
assert!(sv.insert(Entry8::new(4)).is_err());
}
#[test]
fn receipt_begin_commit_no_changes() {
let layout_id = [0xAA; 8];
let data = [0u8; 64];
let mut receipt = StateReceipt::<64>::begin(&layout_id, &data);
assert!(!receipt.is_committed());
receipt.commit(&data); assert!(receipt.is_committed());
assert_eq!(receipt.changed_bytes, 0);
assert!(!receipt.has_changes());
assert!(!receipt.was_resized);
}
#[test]
fn receipt_detects_byte_changes() {
let layout_id = [0xBB; 8];
let before = [0u8; 32];
let mut receipt = StateReceipt::<32>::begin(&layout_id, &before);
let mut after = before;
after[10] = 0xFF;
after[11] = 0xFF;
receipt.commit(&after);
assert!(receipt.is_committed());
assert!(receipt.changed_bytes > 0);
assert!(receipt.has_changes());
}
#[test]
fn receipt_detects_resize() {
let layout_id = [0xCC; 8];
let before = [0u8; 16];
let mut receipt = StateReceipt::<32>::begin(&layout_id, &before);
let after = [0u8; 32]; receipt.commit(&after);
assert!(receipt.was_resized);
assert!(receipt.has_changes());
assert_eq!(receipt.old_size, 16);
assert_eq!(receipt.new_size, 32);
}
#[test]
fn receipt_field_tracking() {
let layout_id = [0xDD; 8];
let before = [0u8; 48];
let mut receipt = StateReceipt::<48>::begin(&layout_id, &before);
let mut after = before;
after[32] = 0xFF;
after[33] = 0xAA;
let fields: &[(&str, usize, usize)] =
&[("authority", 0, 32), ("balance", 32, 8), ("bump", 40, 1)];
receipt.commit_with_fields(&after, fields);
assert_ne!(
receipt.changed_fields & (1 << 1),
0,
"balance field bit should be set"
);
assert_eq!(
receipt.changed_fields & (1 << 0),
0,
"authority field bit should be clear"
);
}
#[test]
fn receipt_invariant_tracking() {
let layout_id = [0xEE; 8];
let data = [0u8; 16];
let mut receipt = StateReceipt::<16>::begin(&layout_id, &data);
receipt.commit(&data);
receipt.set_invariants(true, 5);
assert!(receipt.invariants_passed);
assert_eq!(receipt.invariants_checked, 5);
receipt.set_cpi_invoked(true);
assert!(receipt.cpi_invoked);
}
#[test]
fn receipt_wire_format_roundtrip() {
let layout_id = [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88];
let before = [0u8; 32];
let mut after = before;
after[16] = 0xFF;
let mut receipt = StateReceipt::<32>::begin(&layout_id, &before);
receipt.commit(&after);
receipt.set_invariants(true, 3);
receipt.set_cpi_invoked(true);
let bytes = receipt.to_bytes();
assert_eq!(bytes.len(), RECEIPT_SIZE);
assert_eq!(&bytes[0..8], &layout_id);
let flags = bytes[32];
assert_ne!(flags & (1 << 1), 0, "invariants_passed flag");
assert_ne!(flags & (1 << 2), 0, "cpi_invoked flag");
assert_ne!(flags & (1 << 3), 0, "committed flag");
assert_ne!(
&bytes[33..41],
&[0u8; 8],
"before fingerprint should be set"
);
assert_ne!(&bytes[41..49], &[0u8; 8], "after fingerprint should be set");
}
#[test]
fn receipt_fingerprints_match_when_no_changes() {
let layout_id = [0xAA; 8];
let data = [0x42u8; 32];
let mut receipt = StateReceipt::<32>::begin(&layout_id, &data);
receipt.commit(&data); assert!(!receipt.fingerprint_changed());
assert_eq!(receipt.before_fingerprint, receipt.after_fingerprint);
}
#[test]
fn receipt_fingerprints_differ_on_mutation() {
let layout_id = [0xBB; 8];
let before = [0u8; 32];
let mut after = before;
after[16] = 0xFF;
let mut receipt = StateReceipt::<32>::begin(&layout_id, &before);
receipt.commit(&after);
assert!(receipt.fingerprint_changed());
assert_ne!(receipt.before_fingerprint, receipt.after_fingerprint);
}
#[test]
fn receipt_segment_tracking() {
let layout_id = [0xCC; 8];
let mut data = [0u8; 64];
let mut receipt = StateReceipt::<64>::begin(&layout_id, &data);
data[40] = 0xFF;
let segments: &[(usize, usize)] = &[
(0, 32), (32, 16), (48, 16), ];
receipt.commit_with_segments(&data, segments);
assert_eq!(
receipt.segment_changed_mask & 0x01,
0,
"segment 0 should be clean"
);
assert_ne!(
receipt.segment_changed_mask & 0x02,
0,
"segment 1 should be dirty"
);
assert_eq!(
receipt.segment_changed_mask & 0x04,
0,
"segment 2 should be clean"
);
}
#[test]
fn receipt_policy_flags_roundtrip() {
let layout_id = [0xDD; 8];
let data = [0u8; 16];
let mut receipt = StateReceipt::<16>::begin(&layout_id, &data);
receipt.commit(&data);
receipt.set_policy_flags(0b0000_0110);
let wire = receipt.to_bytes();
let decoded = hopper_core::receipt::DecodedReceipt::from_bytes(&wire).unwrap();
assert_eq!(decoded.policy_flags, 0b0000_0110);
}
#[test]
fn receipt_journal_and_cpi_count() {
let layout_id = [0xEE; 8];
let data = [0u8; 16];
let mut receipt = StateReceipt::<16>::begin(&layout_id, &data);
receipt.commit(&data);
receipt.set_journal_appends(3);
receipt.set_cpi_count(2);
assert_eq!(receipt.journal_appends, 3);
assert_eq!(receipt.cpi_count, 2);
assert!(
receipt.cpi_invoked,
"cpi_invoked should auto-set when count > 0"
);
let wire = receipt.to_bytes();
let decoded = hopper_core::receipt::DecodedReceipt::from_bytes(&wire).unwrap();
assert_eq!(decoded.journal_appends, 3);
assert_eq!(decoded.cpi_count, 2);
assert!(decoded.cpi_invoked);
}
#[test]
fn receipt_decoded_roundtrip_full() {
let layout_id = [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88];
let before = [0u8; 64];
let mut after = before;
after[10] = 0xFF;
after[40] = 0xAA;
let mut receipt = StateReceipt::<64>::begin(&layout_id, &before);
let segments: &[(usize, usize)] = &[(0, 32), (32, 32)];
receipt.commit_with_segments(&after, segments);
receipt.set_invariants(true, 5);
receipt.set_policy_flags(0x13);
receipt.set_journal_appends(7);
receipt.set_cpi_count(1);
let wire = receipt.to_bytes();
let d = hopper_core::receipt::DecodedReceipt::from_bytes(&wire).unwrap();
assert_eq!(d.layout_id, layout_id);
assert!(d.has_changes());
assert!(d.fingerprint_changed());
assert_eq!(d.invariants_checked, 5);
assert!(d.invariants_passed);
assert!(d.committed);
assert!(d.cpi_invoked);
assert_eq!(d.cpi_count, 1);
assert_eq!(d.journal_appends, 7);
assert_eq!(d.policy_flags, 0x13);
assert_ne!(
d.segment_changed_mask & 0x02,
0,
"segment 1 should be dirty"
);
}
#[test]
fn segment_role_roundtrip_all_roles() {
let roles = [
SegmentRole::Core,
SegmentRole::Extension,
SegmentRole::Journal,
SegmentRole::Index,
SegmentRole::Cache,
SegmentRole::Audit,
SegmentRole::Shard,
SegmentRole::Unclassified,
];
for role in roles {
let flags = role.into_flags(0);
let decoded = SegmentRole::from_flags(flags);
assert_eq!(decoded, role, "Role {:?} roundtrip failed", role);
}
}
#[test]
fn segment_role_preserves_lower_bits() {
let lower_flags: u16 = 0x0FFF; let flags = SegmentRole::Journal.into_flags(lower_flags);
assert_eq!(flags & 0x0FFF, 0x0FFF, "Lower bits should be preserved");
assert_eq!(SegmentRole::from_flags(flags), SegmentRole::Journal);
}
#[test]
fn segment_role_semantic_methods() {
assert!(SegmentRole::Core.must_preserve());
assert!(!SegmentRole::Core.clearable_on_migration());
assert!(!SegmentRole::Core.rebuildable());
assert!(SegmentRole::Core.requires_migration_copy());
assert!(!SegmentRole::Core.is_safe_to_drop());
assert!(!SegmentRole::Journal.must_preserve());
assert!(SegmentRole::Journal.clearable_on_migration());
assert!(SegmentRole::Journal.is_append_only());
assert!(!SegmentRole::Journal.requires_migration_copy());
assert!(!SegmentRole::Journal.is_safe_to_drop());
assert!(SegmentRole::Audit.must_preserve());
assert!(SegmentRole::Audit.is_immutable_after_init());
assert!(SegmentRole::Audit.is_append_only());
assert!(SegmentRole::Audit.requires_migration_copy());
assert!(!SegmentRole::Audit.is_safe_to_drop());
assert!(SegmentRole::Cache.clearable_on_migration());
assert!(SegmentRole::Cache.rebuildable());
assert!(SegmentRole::Cache.is_safe_to_drop());
assert!(!SegmentRole::Cache.requires_migration_copy());
assert!(SegmentRole::Index.rebuildable());
assert!(!SegmentRole::Index.clearable_on_migration());
assert!(!SegmentRole::Index.is_safe_to_drop());
assert!(!SegmentRole::Index.requires_migration_copy());
assert!(!SegmentRole::Extension.requires_migration_copy());
assert!(!SegmentRole::Extension.is_safe_to_drop());
assert!(!SegmentRole::Shard.requires_migration_copy());
assert!(!SegmentRole::Shard.is_safe_to_drop());
}
#[test]
fn segment_role_name_strings() {
assert_eq!(SegmentRole::Core.name(), "core");
assert_eq!(SegmentRole::Extension.name(), "extension");
assert_eq!(SegmentRole::Journal.name(), "journal");
assert_eq!(SegmentRole::Index.name(), "index");
assert_eq!(SegmentRole::Cache.name(), "cache");
assert_eq!(SegmentRole::Audit.name(), "audit");
assert_eq!(SegmentRole::Shard.name(), "shard");
assert_eq!(SegmentRole::Unclassified.name(), "unclassified");
}
#[test]
fn segment_role_flag_constants() {
assert_eq!(SEG_ROLE_CORE, 0x0000);
assert_eq!(SEG_ROLE_EXTENSION, 0x1000);
assert_eq!(SEG_ROLE_JOURNAL, 0x2000);
assert_eq!(SEG_ROLE_INDEX, 0x3000);
assert_eq!(SEG_ROLE_CACHE, 0x4000);
assert_eq!(SEG_ROLE_AUDIT, 0x5000);
assert_eq!(SEG_ROLE_SHARD, 0x6000);
}
fn always_pass(_ctx: &ValidationContext) -> Result<(), hopper_runtime::error::ProgramError> {
Ok(())
}
fn always_fail(_ctx: &ValidationContext) -> Result<(), hopper_runtime::error::ProgramError> {
Err(hopper_runtime::error::ProgramError::InvalidArgument)
}
#[test]
fn validation_graph_empty_passes() {
let graph = ValidationGraph::<4>::new();
assert!(graph.is_empty());
let addr = [0u8; 32];
let ctx = ValidationContext::new(
unsafe { &*(&addr as *const [u8; 32] as *const hopper_runtime::Address) },
&[],
&[],
);
assert!(graph.run(&ctx).is_ok());
}
#[test]
fn validation_graph_all_pass() {
let mut graph = ValidationGraph::<4>::new();
graph.add(always_pass).unwrap();
graph.add(always_pass).unwrap();
assert_eq!(graph.len(), 2);
let addr = [0u8; 32];
let ctx = ValidationContext::new(
unsafe { &*(&addr as *const [u8; 32] as *const hopper_runtime::Address) },
&[],
&[],
);
assert!(graph.run(&ctx).is_ok());
}
#[test]
fn validation_graph_fail_fast() {
let mut graph = ValidationGraph::<4>::new();
graph.add(always_pass).unwrap();
graph.add(always_fail).unwrap();
graph.add(always_pass).unwrap();
let addr = [0u8; 32];
let ctx = ValidationContext::new(
unsafe { &*(&addr as *const [u8; 32] as *const hopper_runtime::Address) },
&[],
&[],
);
assert!(graph.run(&ctx).is_err());
}
#[test]
fn validation_graph_run_all_returns_first_error() {
let mut graph = ValidationGraph::<4>::new();
graph.add(always_fail).unwrap();
graph.add(always_pass).unwrap();
let addr = [0u8; 32];
let ctx = ValidationContext::new(
unsafe { &*(&addr as *const [u8; 32] as *const hopper_runtime::Address) },
&[],
&[],
);
assert!(graph.run_all(&ctx).is_err());
}
#[test]
fn validation_graph_overflow_rejected() {
let mut graph = ValidationGraph::<2>::new();
graph.add(always_pass).unwrap();
graph.add(always_pass).unwrap();
assert!(graph.add(always_pass).is_err(), "Should reject overflow");
}
#[test]
fn header_format_is_16_bytes_with_expected_fields() {
let mut buf = [0u8; 32];
let disc = 42u8;
let version = 3u8;
let layout_id = [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88];
write_header(&mut buf, disc, version, &layout_id).unwrap();
assert_eq!(buf[0], disc, "disc at offset 0");
assert_eq!(buf[1], version, "version at offset 1");
assert_eq!(&buf[4..12], &layout_id, "layout_id at offset 4..12");
}
#[test]
fn header_too_short_rejects() {
let buf = [0u8; 1];
assert!(read_version(&buf).is_err());
let short = [0u8; 11];
assert!(read_layout_id(&short).is_err());
}
#[test]
fn header_exact_size_accepts() {
let mut buf = [0u8; 16];
write_header(&mut buf, 1, 1, &[0; 8]).unwrap();
assert_eq!(read_version(&buf).unwrap(), 1);
}
fn build_ix_sysvar_with_metas(
instructions: &[(&[u8; 32], usize)], current_idx: u16,
) -> Vec<u8> {
let num_ix = instructions.len() as u16;
let mut buf = Vec::new();
buf.extend_from_slice(&num_ix.to_le_bytes());
let offset_table_start = buf.len();
for _ in 0..num_ix {
buf.extend_from_slice(&0u16.to_le_bytes());
}
let mut offsets = Vec::new();
for &(program_id, num_metas) in instructions {
offsets.push(buf.len() as u16);
buf.extend_from_slice(&(num_metas as u16).to_le_bytes());
for m in 0..num_metas {
let flags: u8 = if m == 0 { 0x03 } else { 0x02 }; buf.push(flags);
let mut key = [0u8; 32];
key[0] = m as u8;
buf.extend_from_slice(&key);
}
buf.extend_from_slice(program_id);
buf.extend_from_slice(&0u16.to_le_bytes());
}
for (i, offset) in offsets.iter().enumerate() {
let pos = offset_table_start + i * 2;
let bytes = offset.to_le_bytes();
buf[pos] = bytes[0];
buf[pos + 1] = bytes[1];
}
buf.extend_from_slice(¤t_idx.to_le_bytes());
buf
}
#[test]
fn sysvar_parse_with_account_metas() {
let p0 = [10u8; 32];
let p1 = [20u8; 32];
let sysvar = build_ix_sysvar_with_metas(&[(&p0, 3), (&p1, 1)], 0);
assert_eq!(instruction_count(&sysvar).unwrap(), 2);
assert_eq!(current_instruction_index(&sysvar).unwrap(), 0);
assert_eq!(read_program_id_at(&sysvar, 0).unwrap(), p0);
assert_eq!(read_program_id_at(&sysvar, 1).unwrap(), p1);
}
#[test]
fn sysvar_parse_with_many_account_metas() {
let p0 = [0xAA; 32];
let sysvar = build_ix_sysvar_with_metas(&[(&p0, 8)], 0);
assert_eq!(instruction_count(&sysvar).unwrap(), 1);
assert_eq!(read_program_id_at(&sysvar, 0).unwrap(), p0);
}
#[test]
fn sysvar_parse_with_zero_account_metas_three_instructions() {
let p0 = [1u8; 32];
let p1 = [2u8; 32];
let p2 = [3u8; 32];
let sysvar = build_ix_sysvar_with_metas(&[(&p0, 0), (&p1, 0), (&p2, 0)], 2);
assert_eq!(instruction_count(&sysvar).unwrap(), 3);
assert_eq!(current_instruction_index(&sysvar).unwrap(), 2);
assert_eq!(read_program_id_at(&sysvar, 0).unwrap(), p0);
assert_eq!(read_program_id_at(&sysvar, 1).unwrap(), p1);
assert_eq!(read_program_id_at(&sysvar, 2).unwrap(), p2);
}
#[test]
fn sysvar_parse_single_byte_rejects() {
assert!(instruction_count(&[0xFF]).is_err());
}
#[test]
fn sysvar_parse_truncated_offset_table_rejects() {
let mut buf = Vec::new();
buf.extend_from_slice(&5u16.to_le_bytes());
buf.extend_from_slice(&0u16.to_le_bytes()); assert!(read_program_id_at(&buf, 0).is_err());
}
#[test]
fn cpi_guard_current_index_at_boundary() {
let p = [1u8; 32];
let sysvar = build_ix_sysvar(&[&p, &p], 1);
assert_eq!(current_instruction_index(&sysvar).unwrap(), 1);
assert!(require_top_level(&sysvar, unsafe {
&*(&p as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn cpi_guard_flash_loan_requires_both_sides() {
let ours = [1u8; 32];
let other = [2u8; 32];
let sysvar = build_ix_sysvar(&[&ours, &other, &ours], 1);
assert!(detect_flash_loan_bracket(&sysvar, unsafe {
&*(&ours as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_err());
let sysvar2 = build_ix_sysvar(&[&ours, &other, &ours], 0);
assert!(detect_flash_loan_bracket(&sysvar2, unsafe {
&*(&ours as *const [u8; 32] as *const hopper_runtime::Address)
})
.is_ok());
}
#[test]
fn journal_circular_overwrites_oldest_correctly() {
let entry_size = 8;
let capacity = 3;
let buf_size = JOURNAL_HEADER_SIZE + capacity * entry_size;
let mut buf = vec![0u8; buf_size];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(true);
journal.append(Entry8::new(100)).unwrap();
journal.append(Entry8::new(200)).unwrap();
journal.append(Entry8::new(300)).unwrap();
journal.append(Entry8::new(400)).unwrap();
assert_eq!(journal.entry_count(), 3);
assert_eq!(journal.read(0).unwrap().val(), 200);
assert_eq!(journal.read(1).unwrap().val(), 300);
assert_eq!(journal.read(2).unwrap().val(), 400);
}
#[test]
fn journal_circular_wrap_many_preserves_order() {
let capacity = 2;
let buf_size = JOURNAL_HEADER_SIZE + capacity * 8;
let mut buf = vec![0u8; buf_size];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(true);
for i in 0..10u64 {
journal.append(Entry8::new(i * 10)).unwrap();
}
assert_eq!(journal.entry_count(), 2);
assert_eq!(journal.read(0).unwrap().val(), 80);
assert_eq!(journal.read(1).unwrap().val(), 90);
}
#[test]
fn journal_strict_rejects_when_full() {
let capacity = 2;
let buf_size = JOURNAL_HEADER_SIZE + capacity * 8;
let mut buf = vec![0u8; buf_size];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
journal.append(Entry8::new(1)).unwrap();
journal.append(Entry8::new(2)).unwrap();
assert!(
journal.append(Entry8::new(3)).is_err(),
"strict journal should reject when full"
);
}
#[test]
fn journal_latest_returns_most_recent() {
let capacity = 5;
let buf_size = JOURNAL_HEADER_SIZE + capacity * 8;
let mut buf = vec![0u8; buf_size];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
journal.append(Entry8::new(10)).unwrap();
journal.append(Entry8::new(20)).unwrap();
journal.append(Entry8::new(30)).unwrap();
assert_eq!(journal.latest().unwrap().val(), 30);
}
#[test]
fn journal_read_out_of_bounds_fails() {
let capacity = 3;
let buf_size = JOURNAL_HEADER_SIZE + capacity * 8;
let mut buf = vec![0u8; buf_size];
let mut journal = Journal::<Entry8>::from_bytes_mut(&mut buf).unwrap();
journal.init(false);
journal.append(Entry8::new(1)).unwrap();
assert!(
journal.read(1).is_err(),
"index 1 is out of bounds when only 1 entry written"
);
assert!(journal.read(100).is_err());
}
use hopper_core::abi::{FingerprintTransition, LayoutFingerprint};
#[test]
fn fingerprint_verify_header_correct_data() {
let fp = LayoutFingerprint::from_bytes([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88]);
let mut header = [0u8; 16];
header[4..12].copy_from_slice(&[0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88]);
assert!(fp.verify_header(&header).is_ok());
}
#[test]
fn fingerprint_verify_header_wrong_id() {
let fp = LayoutFingerprint::from_bytes([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88]);
let mut header = [0u8; 16];
header[4..12].copy_from_slice(&[0xFF; 8]); assert!(fp.verify_header(&header).is_err());
}
#[test]
fn fingerprint_verify_header_too_short() {
let fp = LayoutFingerprint::from_bytes([1; 8]);
assert!(
fp.verify_header(&[0u8; 11]).is_err(),
"data shorter than 12 should fail"
);
}
#[test]
fn fingerprint_matches_identity() {
let a = LayoutFingerprint::from_bytes([1, 2, 3, 4, 5, 6, 7, 8]);
let b = LayoutFingerprint::from_bytes([1, 2, 3, 4, 5, 6, 7, 8]);
assert!(a.matches(&b));
assert!(!a.differs_from(&b));
}
#[test]
fn fingerprint_differs_on_any_byte() {
let base = [1, 2, 3, 4, 5, 6, 7, 8];
let a = LayoutFingerprint::from_bytes(base);
for i in 0..8 {
let mut changed = base;
changed[i] ^= 0xFF;
let b = LayoutFingerprint::from_bytes(changed);
assert!(a.differs_from(&b), "byte {} change should be detected", i);
}
}
#[test]
fn fingerprint_transition_valid() {
let from = LayoutFingerprint::from_bytes([1; 8]);
let to = LayoutFingerprint::from_bytes([2; 8]);
let t = FingerprintTransition::new(from, to);
t.assert_valid(); }
#[test]
fn receipt_decode_from_bytes_rejects_short_data() {
use hopper_core::receipt::DecodedReceipt;
assert!(DecodedReceipt::from_bytes(&[0u8; 63]).is_none());
assert!(DecodedReceipt::from_bytes(&[0u8; 0]).is_none());
}
#[test]
fn receipt_decode_zeroed_data() {
use hopper_core::receipt::DecodedReceipt;
let data = [0u8; 64];
let r = DecodedReceipt::from_bytes(&data).unwrap();
assert_eq!(r.layout_id, [0; 8]);
assert!(!r.has_changes());
assert!(!r.fingerprint_changed());
assert!(!r.committed);
assert!(!r.cpi_invoked);
assert_eq!(r.cpi_count, 0);
assert_eq!(r.policy_flags, 0);
assert_eq!(r.journal_appends, 0);
}
#[test]
fn slab_alloc_all_slots_then_reject_golden() {
let cap = 4;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let mut slots = Vec::new();
for i in 0..cap {
slots.push(slab.alloc(Entry8::new(i as u64)).unwrap());
}
assert!(slab.is_full());
assert!(
slab.alloc(Entry8::new(999)).is_err(),
"should reject when all slots used"
);
slab.free(slots[0]).unwrap();
assert!(slab.alloc(Entry8::new(777)).is_ok());
assert!(slab.is_full());
}
#[test]
fn slab_double_free_is_rejected() {
let cap = 2;
let bmap = bitmap_bytes(cap);
let total = SLAB_HEADER_SIZE + bmap + cap * Entry8::SIZE;
let mut buf = vec![0u8; total];
Slab::<Entry8>::init(&mut buf, cap).unwrap();
let mut slab = Slab::<Entry8>::from_bytes_mut(&mut buf).unwrap();
let slot = slab.alloc(Entry8::new(1)).unwrap();
slab.free(slot).unwrap();
assert!(slab.free(slot).is_err(), "double free should be rejected");
}