use crate::error::ArenaError;
use crate::handle::{FieldHandle, FieldLocation};
use crate::segment::SegmentList;
use murk_core::FieldId;
#[derive(Clone, Copy, Debug)]
struct RetiredRange {
segment_index: u16,
offset: u32,
len: u32,
}
#[derive(Clone, Debug)]
pub struct SparseSlot {
pub field: FieldId,
pub generation_created: u32,
pub segment_index: u16,
pub offset: u32,
pub len: u32,
pub live: bool,
}
pub struct SparseSlab {
slots: Vec<SparseSlot>,
free_list: Vec<usize>,
live_map: indexmap::IndexMap<FieldId, usize>,
retired_ranges: Vec<RetiredRange>,
pending_retired: Vec<RetiredRange>,
}
impl SparseSlab {
pub fn new() -> Self {
Self {
slots: Vec::new(),
free_list: Vec::new(),
live_map: indexmap::IndexMap::new(),
retired_ranges: Vec::new(),
pending_retired: Vec::new(),
}
}
pub fn alloc(
&mut self,
field: FieldId,
len: u32,
generation: u32,
segments: &mut SegmentList,
) -> Result<FieldHandle, ArenaError> {
let (segment_index, offset) =
if let Some(pos) = self.retired_ranges.iter().position(|r| r.len == len) {
let r = self.retired_ranges.swap_remove(pos);
(r.segment_index, r.offset)
} else {
segments.alloc(len)?
};
if let Some(&old_idx) = self.live_map.get(&field) {
let old = &self.slots[old_idx];
self.pending_retired.push(RetiredRange {
segment_index: old.segment_index,
offset: old.offset,
len: old.len,
});
self.slots[old_idx].live = false;
self.free_list.push(old_idx);
}
let slot = SparseSlot {
field,
generation_created: generation,
segment_index,
offset,
len,
live: true,
};
let slot_idx = if let Some(reuse_idx) = self.free_list.pop() {
self.slots[reuse_idx] = slot;
reuse_idx
} else {
let idx = self.slots.len();
self.slots.push(slot);
idx
};
self.live_map.insert(field, slot_idx);
let location = FieldLocation::Sparse { segment_index };
Ok(FieldHandle::new(generation, offset, len, location))
}
pub fn get_handle(&self, field: FieldId) -> Option<FieldHandle> {
let &idx = self.live_map.get(&field)?;
let slot = &self.slots[idx];
if !slot.live {
return None;
}
Some(FieldHandle::new(
slot.generation_created,
slot.offset,
slot.len,
FieldLocation::Sparse {
segment_index: slot.segment_index,
},
))
}
pub fn contains(&self, field: FieldId) -> bool {
self.live_map.contains_key(&field)
}
pub fn live_count(&self) -> usize {
self.live_map.len()
}
pub fn total_slots(&self) -> usize {
self.slots.len()
}
pub fn free_count(&self) -> usize {
self.free_list.len()
}
pub fn flush_retired(&mut self) {
self.retired_ranges.append(&mut self.pending_retired);
}
pub fn retired_range_count(&self) -> usize {
self.retired_ranges.len()
}
pub fn pending_retired_count(&self) -> usize {
self.pending_retired.len()
}
pub fn live_fields(&self) -> impl Iterator<Item = (&FieldId, &SparseSlot)> {
self.live_map
.iter()
.map(|(field, &idx)| (field, &self.slots[idx]))
}
}
impl Default for SparseSlab {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_segments() -> SegmentList {
SegmentList::new(4096, 4)
}
#[test]
fn alloc_creates_live_slot() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let handle = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
assert_eq!(handle.len(), 100);
assert_eq!(handle.generation(), 1);
assert!(matches!(handle.location(), FieldLocation::Sparse { .. }));
assert_eq!(slab.live_count(), 1);
}
#[test]
fn realloc_marks_old_slot_dead() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let h1 = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let h2 = slab.alloc(FieldId(0), 100, 2, &mut segs).unwrap();
assert_eq!(h1.generation(), 1);
assert_eq!(h2.generation(), 2);
assert_eq!(slab.live_count(), 1);
let live_h = slab.get_handle(FieldId(0)).unwrap();
assert_eq!(live_h.generation(), 2);
}
#[test]
fn get_handle_returns_none_for_unknown() {
let slab = SparseSlab::new();
assert!(slab.get_handle(FieldId(99)).is_none());
}
#[test]
fn multiple_fields_tracked_independently() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 50, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 75, 1, &mut segs).unwrap();
assert_eq!(slab.live_count(), 2);
assert_eq!(slab.get_handle(FieldId(0)).unwrap().len(), 50);
assert_eq!(slab.get_handle(FieldId(1)).unwrap().len(), 75);
}
#[test]
fn free_slots_are_reused() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 50, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(0), 50, 2, &mut segs).unwrap();
assert_eq!(slab.total_slots(), 1);
}
#[test]
fn retired_range_reused_after_flush() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
let used_after_init = segs.total_used();
let _ = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let used_after_cow1 = segs.total_used();
assert!(used_after_cow1 > used_after_init);
assert_eq!(slab.pending_retired_count(), 1);
assert_eq!(slab.retired_range_count(), 0);
slab.flush_retired();
assert_eq!(slab.pending_retired_count(), 0);
assert_eq!(slab.retired_range_count(), 1);
let _ = slab.alloc(FieldId(0), 100, 2, &mut segs).unwrap();
assert_eq!(
segs.total_used(),
used_after_cow1,
"no new segment memory consumed"
);
assert_eq!(slab.retired_range_count(), 0, "retired range was consumed");
}
#[test]
fn pending_retired_not_reused_before_flush() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let used_before = segs.total_used();
let _ = slab.alloc(FieldId(0), 100, 2, &mut segs).unwrap();
assert!(
segs.total_used() > used_before,
"should bump-allocate because pending ranges are not reusable"
);
}
#[test]
fn many_cow_writes_with_flush_stays_bounded() {
let mut slab = SparseSlab::new();
let mut segs = SegmentList::new(1024, 2);
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
for gen in 1..=50u32 {
slab.flush_retired();
let _ = slab.alloc(FieldId(0), 100, gen, &mut segs).unwrap();
}
assert!(segs.total_used() <= 300);
}
#[test]
fn alloc_falls_back_to_bump_when_no_size_match() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 100, 1, &mut segs).unwrap();
slab.flush_retired();
assert_eq!(slab.retired_range_count(), 2);
let used_before = segs.total_used();
let _ = slab.alloc(FieldId(2), 200, 2, &mut segs).unwrap();
assert!(
segs.total_used() > used_before,
"should bump-allocate when no retired range matches the requested size"
);
assert_eq!(slab.retired_range_count(), 2);
}
#[test]
fn three_fields_same_size_all_ranges_retired() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(2), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 100, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(2), 100, 1, &mut segs).unwrap();
slab.flush_retired();
assert_eq!(slab.retired_range_count(), 3);
let used_before = segs.total_used();
let _ = slab.alloc(FieldId(0), 100, 2, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 100, 2, &mut segs).unwrap();
let _ = slab.alloc(FieldId(2), 100, 2, &mut segs).unwrap();
assert_eq!(
segs.total_used(),
used_before,
"no new segment memory consumed — all retired ranges reused"
);
assert_eq!(slab.retired_range_count(), 0, "all retired ranges consumed");
}
#[test]
fn different_size_ranges_not_mixed() {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let _ = slab.alloc(FieldId(0), 100, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 200, 0, &mut segs).unwrap();
let _ = slab.alloc(FieldId(0), 100, 1, &mut segs).unwrap();
let _ = slab.alloc(FieldId(1), 200, 1, &mut segs).unwrap();
slab.flush_retired();
let used_before = segs.total_used();
let _ = slab.alloc(FieldId(1), 200, 2, &mut segs).unwrap();
assert_eq!(segs.total_used(), used_before);
assert_eq!(slab.retired_range_count(), 1);
}
#[cfg(not(miri))]
mod proptests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn live_count_equals_distinct_fields(
field_ids in proptest::collection::vec(0u32..16, 1..20),
) {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
for (gen, &fid) in field_ids.iter().enumerate() {
let _ = slab.alloc(FieldId(fid), 10, gen as u32, &mut segs);
}
let distinct: indexmap::IndexSet<_> = field_ids.iter().collect();
prop_assert_eq!(slab.live_count(), distinct.len());
}
#[test]
fn get_handle_returns_latest_generation(
gens in proptest::collection::vec(1u32..100, 2..10),
) {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let field = FieldId(0);
let mut last_gen = 0;
for &gen in &gens {
let _ = slab.alloc(field, 10, gen, &mut segs);
last_gen = gen;
}
let handle = slab.get_handle(field).unwrap();
prop_assert_eq!(handle.generation(), last_gen);
}
#[test]
fn total_slots_bounded_by_alloc_count(
ops in proptest::collection::vec((0u32..8, 1u32..50), 1..30),
) {
let mut slab = SparseSlab::new();
let mut segs = make_segments();
let mut alloc_count = 0u32;
for (gen, (fid, len)) in ops.iter().enumerate() {
if slab.alloc(FieldId(*fid), *len, gen as u32, &mut segs).is_ok() {
alloc_count += 1;
}
}
prop_assert!(slab.total_slots() as u32 <= alloc_count);
}
}
}
}