use crate::{SlotBackendKind, SlotStorage};
#[test]
fn test_all_backends_smoke() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_smoke(kind);
}
}
fn test_backend_smoke(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
let result1 = storage.begin_group(100);
assert!(
!result1.restored_from_gap,
"{:?}: First group should not be from gap",
kind
);
let slot = storage.alloc_value_slot(|| 42);
assert_eq!(
*storage.read_value::<i32>(slot),
42,
"{:?}: Value should match",
kind
);
storage.write_value(slot, 100);
assert_eq!(
*storage.read_value::<i32>(slot),
100,
"{:?}: Updated value should match",
kind
);
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_recomposition() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_recomposition(kind);
}
}
fn test_backend_recomposition(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let result = storage.begin_group(200);
let group1 = result.group;
storage.set_group_scope(group1, 1);
let _slot1 = storage.alloc_value_slot(|| "hello");
storage.end_group();
storage.flush();
storage.reset();
if let Some(_group) = storage.begin_recranpose_at_scope(1) {
storage.end_group();
storage.end_recompose();
} else {
panic!("{:?}: Should find scope for recomposition", kind);
}
}
#[test]
fn test_backends_gaps() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_gaps(kind);
}
}
fn test_backend_gaps(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let result = storage.begin_group(300);
let group1 = result.group;
storage.set_group_scope(group1, 2);
let _inner = storage.begin_group(301);
let _slot = storage.alloc_value_slot(|| 123);
storage.end_group();
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_allocate_fewer_slots() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_allocate_fewer_slots(kind);
}
}
fn test_backend_allocate_fewer_slots(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _group = storage.begin_group(400);
let slot1 = storage.alloc_value_slot(|| 10);
let slot2 = storage.alloc_value_slot(|| 20);
let slot3 = storage.alloc_value_slot(|| 30);
assert_eq!(*storage.read_value::<i32>(slot1), 10);
assert_eq!(*storage.read_value::<i32>(slot2), 20);
assert_eq!(*storage.read_value::<i32>(slot3), 30);
storage.end_group();
storage.flush();
storage.reset();
let _group = storage.begin_group(400);
let slot1_v2 = storage.alloc_value_slot(|| 10);
let slot2_v2 = storage.alloc_value_slot(|| 20);
assert_eq!(*storage.read_value::<i32>(slot1_v2), 10);
assert_eq!(*storage.read_value::<i32>(slot2_v2), 20);
storage.finalize_current_group();
storage.end_group();
storage.flush();
storage.reset();
let _group = storage.begin_group(400);
let slot1_v3 = storage.alloc_value_slot(|| 10);
let slot2_v3 = storage.alloc_value_slot(|| 20);
let slot3_v3 = storage.alloc_value_slot(|| 30);
assert_eq!(*storage.read_value::<i32>(slot1_v3), 10);
assert_eq!(*storage.read_value::<i32>(slot2_v3), 20);
assert_eq!(*storage.read_value::<i32>(slot3_v3), 30);
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_gap_restore() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_gap_restore(kind);
}
}
fn test_backend_gap_restore(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let result1 = storage.begin_group(500);
assert!(
!result1.restored_from_gap,
"{:?}: First group should not be from gap",
kind
);
let _slot = storage.alloc_value_slot(|| 42);
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
storage.finalize_current_group();
storage.flush();
storage.reset();
let result3 = storage.begin_group(500);
assert!(
result3.restored_from_gap,
"{:?}: Group should be restored from gap",
kind
);
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_insert_middle_after_finalize() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_insert_middle_after_finalize(kind);
}
}
fn test_backend_insert_middle_after_finalize(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _group = storage.begin_group(700);
let slot1 = storage.alloc_value_slot(|| 10);
let slot2 = storage.alloc_value_slot(|| 20);
let slot3 = storage.alloc_value_slot(|| 30);
storage.finalize_current_group();
storage.end_group();
storage.flush();
assert_eq!(*storage.read_value::<i32>(slot1), 10);
assert_eq!(*storage.read_value::<i32>(slot2), 20);
assert_eq!(*storage.read_value::<i32>(slot3), 30);
storage.reset();
let _group = storage.begin_group(700);
let slot1_v2 = storage.alloc_value_slot(|| 10);
let slot2_v2 = storage.alloc_value_slot(|| 20);
storage.record_node(42);
storage.finalize_current_group();
storage.end_group();
storage.flush();
assert_eq!(*storage.read_value::<i32>(slot1_v2), 10);
assert_eq!(*storage.read_value::<i32>(slot2_v2), 20);
}
#[test]
fn test_backends_gap_restore_frame_end() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_gap_restore_frame_end(kind);
}
}
fn test_backend_gap_restore_frame_end(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _group = storage.begin_group(800);
let _slot1 = storage.alloc_value_slot(|| 100);
let _slot2 = storage.alloc_value_slot(|| 200);
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
let result = storage.begin_group(800);
if result.restored_from_gap {
storage.finalize_current_group();
}
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_scope_recomposition() {
let backends = [
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
];
for kind in backends {
test_backend_scope_recomposition(kind);
}
}
fn test_backend_scope_recomposition(kind: SlotBackendKind) {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let outer = storage.begin_group(600);
storage.set_group_scope(outer.group, 10);
let inner = storage.begin_group(601);
storage.set_group_scope(inner.group, 11);
let _slot = storage.alloc_value_slot(|| "test");
storage.end_group(); storage.end_group(); storage.flush();
storage.reset();
let found = storage.begin_recranpose_at_scope(11);
assert!(found.is_some(), "{:?}: Should find inner scope", kind);
if found.is_some() {
storage.end_group();
storage.end_recompose();
}
storage.flush();
}
fn all_backends() -> [SlotBackendKind; 4] {
[
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Hierarchical,
SlotBackendKind::Split,
]
}
#[allow(dead_code)]
fn all_but_hierarchical() -> [SlotBackendKind; 3] {
[
SlotBackendKind::Baseline,
SlotBackendKind::Chunked,
SlotBackendKind::Split,
]
}
#[test]
fn test_backends_nested_groups_partial_finalize() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _outer = storage.begin_group(100);
let _inner = storage.begin_group(101);
let _slot = storage.alloc_value_slot(|| 1usize);
storage.end_group(); storage.finalize_current_group();
storage.end_group(); storage.flush();
storage.reset();
let _outer2 = storage.begin_group(100);
let _inner2 = storage.begin_group(101);
let slot2 = storage.alloc_value_slot(|| 2usize);
storage.write_value(slot2, 2usize);
assert_eq!(*storage.read_value::<usize>(slot2), 2, "{:?}", kind);
storage.end_group(); storage.end_group(); storage.flush();
}
}
#[test]
fn test_backends_root_finalize_safe() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(2000);
let _slot = storage.alloc_value_slot(|| 10i32);
storage.end_group();
storage.flush();
storage.reset();
let _ = storage.finalize_current_group();
storage.flush();
}
}
#[test]
fn test_backends_value_slot_type_mismatch_overwrite() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(3000);
let slot = storage.alloc_value_slot(|| 123i32);
assert_eq!(*storage.read_value::<i32>(slot), 123);
storage.end_group();
storage.flush();
storage.reset();
let _g = storage.begin_group(3000);
let slot2 = storage.alloc_value_slot(|| "hello");
assert_eq!(storage.read_value::<&str>(slot2), &"hello");
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_value_node_value_sequence() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(4000);
let s1 = storage.alloc_value_slot(|| 1u8);
storage.record_node(999); let s2 = storage.alloc_value_slot(|| 2u8);
assert_eq!(*storage.read_value::<u8>(s1), 1, "{:?}", kind);
assert_eq!(*storage.read_value::<u8>(s2), 2, "{:?}", kind);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_gap_scan_forward() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(5000);
for i in 0..16 {
let s = storage.alloc_value_slot(|| i);
assert_eq!(*storage.read_value::<i32>(s), i, "{:?}", kind);
}
storage.finalize_current_group();
storage.end_group();
storage.flush();
storage.reset();
let _g = storage.begin_group(5000);
let _s = storage.alloc_value_slot(|| 999i32);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_gap_restore_rejects_key_mismatch() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(900);
let _s = storage.alloc_value_slot(|| 1i32);
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
let res = storage.begin_group(901);
assert!(
!res.restored_from_gap,
"{:?}: should not restore if key changed",
kind
);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_allocate_more_slots_after_gap_restore() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(910);
let _a = storage.alloc_value_slot(|| 10i32);
let _b = storage.alloc_value_slot(|| 20i32);
storage.end_group();
storage.flush();
storage.reset();
storage.finalize_current_group(); storage.flush();
storage.reset();
let res = storage.begin_group(910);
assert!(res.restored_from_gap, "{:?}: should restore from gap", kind);
let _a2 = storage.alloc_value_slot(|| 10i32);
let _b2 = storage.alloc_value_slot(|| 20i32);
let c2 = storage.alloc_value_slot(|| 30i32);
assert_eq!(*storage.read_value::<i32>(c2), 30);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_multiple_sibling_groups_gap_isolated() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g1 = storage.begin_group(1000);
let _s1 = storage.alloc_value_slot(|| 1i32);
storage.end_group();
let _g2 = storage.begin_group(1001);
let _s2 = storage.alloc_value_slot(|| 2i32);
storage.end_group();
storage.flush();
storage.reset();
let _g1b = storage.begin_group(1000);
let _s1b = storage.alloc_value_slot(|| 11i32);
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
let _g1c = storage.begin_group(1000);
let _s1c = storage.alloc_value_slot(|| 111i32);
storage.end_group();
let g2c = storage.begin_group(1001);
assert!(
g2c.restored_from_gap,
"{:?}: second sibling should restore from gap",
kind
);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_insert_before_following_group() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g1 = storage.begin_group(1100);
let _v1 = storage.alloc_value_slot(|| 1u32);
storage.end_group();
let _g2 = storage.begin_group(1101);
let _v2 = storage.alloc_value_slot(|| 2u32);
storage.end_group();
storage.flush();
storage.reset();
let _g1b = storage.begin_group(1100);
let _v1b = storage.alloc_value_slot(|| 1u32);
storage.record_node(777);
storage.end_group();
let _g2b = storage.begin_group(1101);
let v2b = storage.alloc_value_slot(|| 22u32);
storage.write_value(v2b, 22u32);
assert_eq!(
*storage.read_value::<u32>(v2b),
22,
"{:?}: shifted second group must still work",
kind
);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_scope_recomposition_not_found_is_safe() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(1200);
let _v = storage.alloc_value_slot(|| 1i32);
storage.end_group();
storage.flush();
storage.reset();
let found = storage.begin_recranpose_at_scope(999_999);
assert!(
found.is_none(),
"{:?}: nonexistent scope should return None",
kind
);
let _g2 = storage.begin_group(1200);
let _v2 = storage.alloc_value_slot(|| 2i32);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_split_payload_persists_across_gap() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Split);
storage.reset();
let _g = storage.begin_group(6000);
let vs = storage.alloc_value_slot(|| String::from("keep me"));
assert_eq!(storage.read_value::<String>(vs), "keep me");
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
let _g = storage.begin_group(6000);
let vs2 = storage.alloc_value_slot(|| String::from("should not overwrite if recovered"));
let _ = storage.read_value::<String>(vs2);
storage.end_group();
storage.flush();
}
#[test]
fn test_hierarchical_scope_search() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Hierarchical);
storage.reset();
let result = storage.begin_group(7000);
storage.set_group_scope(result.group, 123);
let _slot = storage.alloc_value_slot(|| 42i32);
storage.end_group();
storage.flush();
storage.reset();
let found = storage.begin_recranpose_at_scope(123);
assert!(found.is_some(), "Hierarchical should find scope 123");
if found.is_some() {
storage.end_group();
storage.end_recompose();
}
storage.flush();
}
#[test]
fn test_split_payload_gap_then_type_change() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Split);
storage.reset();
let _g = storage.begin_group(1300);
let s = storage.alloc_value_slot(|| String::from("first"));
assert_eq!(storage.read_value::<String>(s), "first");
storage.end_group();
storage.finalize_current_group();
storage.flush();
storage.reset();
let _g2 = storage.begin_group(1300);
let s2 = storage.alloc_value_slot(|| 1234i32);
assert_eq!(*storage.read_value::<i32>(s2), 1234);
storage.end_group();
storage.flush();
}
#[test]
fn test_chunked_anchor_rebuild_after_shift() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Chunked);
storage.reset();
let _g = storage.begin_group(1400);
let mut ids = Vec::new();
for i in 0..8 {
let s = storage.alloc_value_slot(|| i);
ids.push(s);
}
storage.end_group();
storage.flush();
storage.reset();
let _g2 = storage.begin_group(1400);
let _ = storage.alloc_value_slot(|| 0i32);
storage.record_node(9999);
let s_after = storage.alloc_value_slot(|| 777i32);
storage.write_value(s_after, 777i32);
assert_eq!(*storage.read_value::<i32>(s_after), 777);
storage.end_group();
storage.flush();
}
#[test]
fn test_chunked_value_slot_is_position_based() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Chunked);
storage.reset();
let _g = storage.begin_group(9000);
let slot = storage.alloc_value_slot(|| 123i32);
assert_eq!(
slot.index(),
1,
"chunked backend must use position-based ValueSlotId like other backends"
);
storage.end_group();
storage.flush();
}
#[test]
fn test_chunked_read_after_root_finalize() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Chunked);
storage.reset();
let _g = storage.begin_group(9100);
let _slot = storage.alloc_value_slot(|| 10i32);
storage.end_group();
storage.flush();
storage.reset();
storage.finalize_current_group(); storage.flush();
storage.reset();
let _g = storage.begin_group(9100);
let slot2 = storage.alloc_value_slot(|| 10i32);
assert_eq!(*storage.read_value::<i32>(slot2), 10);
storage.end_group();
storage.flush();
}
#[test]
fn test_backends_gap_restore_shorter_children_respects_parent_frame() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _parent = storage.begin_group(2000);
let _child = storage.begin_group(2001);
let _v1 = storage.alloc_value_slot(|| 100u32);
let _v2 = storage.alloc_value_slot(|| 200u32);
let _v3 = storage.alloc_value_slot(|| 300u32);
storage.end_group(); storage.finalize_current_group();
storage.end_group(); storage.flush();
storage.reset();
storage.finalize_current_group();
storage.flush();
storage.reset();
let res_parent = storage.begin_group(2000);
assert!(
res_parent.restored_from_gap,
"{:?}: parent should restore from gap",
kind
);
let _res_child = storage.begin_group(2001);
let v1_new = storage.alloc_value_slot(|| 111u32);
storage.write_value(v1_new, 111u32);
assert_eq!(
*storage.read_value::<u32>(v1_new),
111,
"{:?}: should read new value",
kind
);
storage.finalize_current_group();
storage.end_group();
storage.end_group(); storage.flush();
}
}
#[test]
fn test_backends_insert_in_first_group_preserves_nested_second_group() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g1 = storage.begin_group(3000);
let _v1 = storage.alloc_value_slot(|| 10i32);
storage.end_group();
let _g2 = storage.begin_group(3001);
let _inner = storage.begin_group(3002);
let _v2 = storage.alloc_value_slot(|| 20i32);
storage.end_group(); storage.end_group();
storage.flush();
storage.reset();
let _g1b = storage.begin_group(3000);
let _v1b = storage.alloc_value_slot(|| 10i32);
storage.record_node(888);
storage.end_group();
let _g2b = storage.begin_group(3001);
let _innerb = storage.begin_group(3002);
let v2b = storage.alloc_value_slot(|| 22i32);
storage.write_value(v2b, 22i32);
assert_eq!(
*storage.read_value::<i32>(v2b),
22,
"{:?}: nested group after shift must be readable",
kind
);
storage.end_group(); storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_root_finalize_then_gap_restore_anchors_consistent() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let _g = storage.begin_group(4000);
let slot1 = storage.alloc_value_slot(|| 42i32);
assert_eq!(*storage.read_value::<i32>(slot1), 42, "{:?}", kind);
storage.end_group();
storage.flush();
storage.reset();
storage.finalize_current_group();
storage.flush();
storage.reset();
let res = storage.begin_group(4000);
assert!(res.restored_from_gap, "{:?}: should restore from gap", kind);
let slot2 = storage.alloc_value_slot(|| 42i32);
assert_eq!(
*storage.read_value::<i32>(slot2),
42,
"{:?}: value must be readable after gap restore",
kind
);
storage.end_group();
storage.flush();
}
}
#[test]
fn test_backends_gap_restore_preserves_scope() {
for kind in all_backends() {
use crate::slot_backend::SlotBackend;
let mut storage = SlotBackend::new(kind);
storage.reset();
let result = storage.begin_group(5000);
storage.set_group_scope(result.group, 123);
let _v = storage.alloc_value_slot(|| "test");
storage.end_group();
storage.flush();
storage.reset();
storage.finalize_current_group();
storage.flush();
storage.reset();
let res = storage.begin_group(5000);
assert!(res.restored_from_gap, "{:?}: should restore from gap", kind);
storage.end_group();
storage.flush();
storage.reset();
let found = storage.begin_recranpose_at_scope(123);
assert!(
found.is_some(),
"{:?}: scope 123 must be findable after gap restore",
kind
);
if found.is_some() {
storage.end_group();
storage.end_recompose();
}
storage.flush();
}
}
#[test]
fn test_split_type_mismatch_overwrites_payload_string_to_u64() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Split);
storage.reset();
let _g = storage.begin_group(6000);
let slot1 = storage.alloc_value_slot(|| String::from("original"));
assert_eq!(storage.read_value::<String>(slot1), "original");
storage.end_group();
storage.flush();
storage.reset();
let _g2 = storage.begin_group(6000); let slot2 = storage.alloc_value_slot(|| 9999u64);
assert_eq!(*storage.read_value::<u64>(slot2), 9999);
storage.end_group();
storage.flush();
}
#[test]
fn test_split_type_mismatch_overwrites_payload_string_to_u64_via_gap() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Split);
storage.reset();
let _g = storage.begin_group(6000);
let _s = storage.alloc_value_slot(|| String::from("original"));
storage.end_group();
storage.flush();
storage.reset();
storage.finalize_current_group(); storage.flush();
storage.reset();
let res = storage.begin_group(6000);
assert!(res.restored_from_gap, "should restore from gap now");
let s2 = storage.alloc_value_slot(|| 9999u64);
assert_eq!(*storage.read_value::<u64>(s2), 9999);
storage.end_group();
storage.flush();
}
#[test]
fn test_chunked_gap_scan_upper_bound_fallback() {
use crate::slot_backend::{SlotBackend, SlotBackendKind};
let mut storage = SlotBackend::new(SlotBackendKind::Chunked);
storage.reset();
let _g = storage.begin_group(7000);
for i in 0..150 {
let s = storage.alloc_value_slot(|| i as u32);
assert_eq!(*storage.read_value::<u32>(s), i as u32);
}
storage.end_group();
storage.flush();
storage.reset();
let _g = storage.begin_group(7000);
let _s1 = storage.alloc_value_slot(|| 0u32);
let _s2 = storage.alloc_value_slot(|| 1u32);
storage.finalize_current_group();
storage.end_group();
storage.flush();
storage.reset();
let _g = storage.begin_group(7000);
let _s1 = storage.alloc_value_slot(|| 0u32);
let _s2 = storage.alloc_value_slot(|| 1u32);
let s3 = storage.alloc_value_slot(|| 999u32);
storage.write_value(s3, 999u32);
assert_eq!(
*storage.read_value::<u32>(s3),
999,
"fallback overwrite must work"
);
storage.end_group();
storage.flush();
}