use alloc::vec::Vec;
const INLINE_CAP: usize = 16;
#[derive(Debug)]
pub struct SizeCache {
inline: [u32; INLINE_CAP],
spill: Vec<u32>,
len: u32,
cursor: u32,
}
impl Default for SizeCache {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl SizeCache {
#[inline]
#[must_use]
pub const fn new() -> Self {
Self {
inline: [0u32; INLINE_CAP],
spill: Vec::new(),
len: 0,
cursor: 0,
}
}
#[inline]
pub fn clear(&mut self) {
self.spill.clear();
self.len = 0;
self.cursor = 0;
}
#[inline]
pub fn reserve(&mut self) -> usize {
debug_assert!(self.len < u32::MAX, "SizeCache slot count overflow");
let idx = self.len as usize;
if idx < INLINE_CAP {
self.inline[idx] = 0;
} else {
self.spill.push(0);
}
self.len += 1;
idx
}
#[inline]
#[track_caller]
pub fn set(&mut self, idx: usize, size: u32) {
assert!(
idx < self.len as usize,
"SizeCache::set: slot {idx} not reserved (len {})",
self.len
);
if idx < INLINE_CAP {
self.inline[idx] = size;
} else {
self.spill[idx - INLINE_CAP] = size;
}
}
#[inline]
#[track_caller]
pub fn consume_next(&mut self) -> u32 {
let idx = self.cursor as usize;
if idx >= self.len as usize {
Self::overrun(idx, self.len);
}
self.cursor += 1;
if idx < INLINE_CAP {
self.inline[idx]
} else {
self.spill[idx - INLINE_CAP]
}
}
#[cold]
#[inline(never)]
#[track_caller]
fn overrun(idx: usize, len: u32) -> ! {
panic!(
"SizeCache cursor overrun: write_to consumed {} slots but \
compute_size produced {len} (traversal-order mismatch)",
idx + 1,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_cache_is_default() {
let c = SizeCache::new();
assert_eq!(c.len, 0);
assert_eq!(c.cursor, 0);
assert!(c.spill.is_empty());
}
#[test]
fn spill_past_inline_cap_preserves_order() {
const N: usize = INLINE_CAP * 2 + 5;
let mut c = SizeCache::new();
let slots: alloc::vec::Vec<usize> = (0..N).map(|_| c.reserve()).collect();
for (i, &s) in slots.iter().enumerate().rev() {
c.set(s, i as u32 * 7);
}
assert_eq!(c.spill.len(), N - INLINE_CAP);
for i in 0..N {
assert_eq!(c.consume_next(), i as u32 * 7);
}
}
#[test]
fn boundary_at_inline_cap() {
let mut c = SizeCache::new();
for i in 0..INLINE_CAP {
let s = c.reserve();
c.set(s, i as u32);
}
assert!(c.spill.is_empty(), "no spill at exactly INLINE_CAP");
let s = c.reserve();
c.set(s, 999);
assert_eq!(c.spill.len(), 1);
for i in 0..INLINE_CAP {
assert_eq!(c.consume_next(), i as u32);
}
assert_eq!(c.consume_next(), 999);
}
#[test]
fn reserve_set_next_roundtrip() {
let mut c = SizeCache::new();
let s0 = c.reserve();
let s1 = c.reserve();
c.set(s0, 10);
c.set(s1, 20);
assert_eq!(c.consume_next(), 10);
assert_eq!(c.consume_next(), 20);
}
#[test]
fn preorder_reservation_with_nested_recursion() {
let mut c = SizeCache::new();
let slot_a = c.reserve();
let slot_x = c.reserve();
c.set(slot_x, 5);
c.set(slot_a, 7);
let slot_b = c.reserve();
c.set(slot_b, 3);
assert_eq!(c.consume_next(), 7); assert_eq!(c.consume_next(), 5); assert_eq!(c.consume_next(), 3); }
#[test]
fn clear_resets_and_retains_capacity() {
let mut c = SizeCache::new();
for _ in 0..(INLINE_CAP + 4) {
c.reserve();
}
let cap = c.spill.capacity();
assert!(cap >= 4);
c.clear();
assert_eq!(c.len, 0);
assert_eq!(c.cursor, 0);
assert!(c.spill.capacity() >= cap);
let s = c.reserve();
c.set(s, 99);
assert_eq!(c.consume_next(), 99);
}
#[test]
fn reserve_without_set_yields_zero() {
let mut c = SizeCache::new();
let _ = c.reserve();
assert_eq!(c.consume_next(), 0);
}
#[test]
fn clear_then_reserve_without_set_yields_zero() {
let mut c = SizeCache::new();
for i in 0..(INLINE_CAP + 3) {
let s = c.reserve();
c.set(s, (i + 100) as u32);
}
c.clear();
let _ = c.reserve();
assert_eq!(c.consume_next(), 0);
}
#[test]
#[should_panic(expected = "SizeCache cursor overrun")]
fn next_past_end_panics() {
let mut c = SizeCache::new();
c.consume_next();
}
}