use core::fmt;
use crate::bytes::{Cursor, OutOfBounds};
const MAGIC: u32 = 0x0907_2000;
const HEADER_LEN: usize = 4 + 4 + 4 + 4 * 24;
#[derive(Debug, Clone)]
pub struct Vxl {
pub vsid: u32,
pub ipo: [f64; 3],
pub ist: [f64; 3],
pub ihe: [f64; 3],
pub ifo: [f64; 3],
pub data: Box<[u8]>,
pub column_offset: Box<[u32]>,
pub mip_base_offsets: Box<[usize]>,
pub vbit: Box<[u32]>,
pub vbiti: u32,
}
impl Vxl {
#[must_use]
pub fn column_data(&self, idx: usize) -> &[u8] {
let start = self.column_offset[idx] as usize;
let end = start + slng(&self.data[start..]);
&self.data[start..end]
}
#[must_use]
pub fn mip_count(&self) -> u32 {
u32::try_from(self.mip_base_offsets.len() - 1).expect("mip count fits in u32")
}
#[must_use]
pub fn column_offset_for_mip(&self, mip: u32) -> &[u32] {
let mip_idx = mip as usize;
let lo = self.mip_base_offsets[mip_idx];
let hi = self.mip_base_offsets[mip_idx + 1];
&self.column_offset[lo..hi]
}
#[must_use]
pub fn column_data_for_mip(&self, mip: u32, idx: usize) -> &[u8] {
let table = self.column_offset_for_mip(mip);
let start = table[idx] as usize;
let end = start + slng(&self.data[start..]);
&self.data[start..end]
}
fn reset_to_single_mip(&mut self) {
let n_cols = (self.vsid as usize) * (self.vsid as usize);
if self.mip_base_offsets.len() <= 2 {
return;
}
let mip0_end_in_data = self.column_offset[n_cols] as usize;
self.data = self.data[..mip0_end_in_data].to_vec().into_boxed_slice();
self.column_offset = self.column_offset[..=n_cols].to_vec().into_boxed_slice();
self.mip_base_offsets = Box::new([0, n_cols + 1]);
}
#[allow(clippy::missing_panics_doc)] pub fn generate_mips(&mut self, max_mips: u32) {
self.reset_to_single_mip();
if max_mips <= 1 {
return;
}
let mut mipnum: u32 = 1;
let mut src_vsid: u32 = self.vsid;
let mut src_z_bound: i32 = MAXZDIM;
while src_vsid > 1 && src_z_bound > 1 && mipnum < max_mips {
let dst_vsid = src_vsid >> 1;
let dst_z_bound = src_z_bound >> 1;
let src_offsets_lo = self.mip_base_offsets[(mipnum - 1) as usize];
let src_offsets_hi = self.mip_base_offsets[mipnum as usize];
let src_offsets = self.column_offset[src_offsets_lo..src_offsets_hi].to_vec();
let (new_data_segment, new_offsets) =
build_mip_level(&self.data, &src_offsets, src_vsid, dst_vsid);
let mut combined_data = self.data.to_vec();
combined_data.extend_from_slice(&new_data_segment);
self.data = combined_data.into_boxed_slice();
let mut combined_offsets = self.column_offset.to_vec();
combined_offsets.extend_from_slice(&new_offsets);
self.column_offset = combined_offsets.into_boxed_slice();
debug_assert_eq!(
*self
.mip_base_offsets
.last()
.expect("mip_base_offsets non-empty"),
src_offsets_hi
);
let mut combined_mips = self.mip_base_offsets.to_vec();
combined_mips.push(self.column_offset.len());
self.mip_base_offsets = combined_mips.into_boxed_slice();
mipnum += 1;
src_vsid = dst_vsid;
src_z_bound = dst_z_bound;
}
}
pub fn reserve_edit_capacity(&mut self, headroom_bytes: usize) {
let headroom_aligned = (headroom_bytes + 3) & !3;
let old_len = self.data.len();
let new_len = old_len + headroom_aligned;
u32::try_from(new_len).expect("vbuf size fits in u32");
let mut new_data = Vec::with_capacity(new_len);
new_data.extend_from_slice(&self.data);
new_data.resize(new_len, 0);
self.data = new_data.into_boxed_slice();
let total_dwords = new_len / 4;
let n_words = total_dwords.div_ceil(32);
let mut vbit = vec![0u32; n_words].into_boxed_slice();
for mip in 0..self.mip_count() {
let table = self.column_offset_for_mip(mip);
for window in table.windows(2) {
let lo = (window[0] / 4) as usize;
let hi = (window[1] / 4) as usize;
for d in lo..hi {
vbit[d >> 5] |= 1u32 << (d & 31);
}
}
}
self.vbit = vbit;
self.vbiti = 0;
}
pub fn voxalloc(&mut self, n_bytes: u32) -> u32 {
assert!(
!self.vbit.is_empty(),
"voxalloc requires reserve_edit_capacity"
);
assert!(
n_bytes > 0 && n_bytes % 4 == 0,
"voxalloc n_bytes must be a positive multiple of 4 (got {n_bytes})"
);
let danum = n_bytes / 4;
let total_dwords = u32::try_from(self.data.len() / 4).expect("pool dwords fit in u32");
assert!(
danum <= total_dwords,
"voxalloc: requested span > pool size"
);
let vend = total_dwords - danum;
for _badcnt in 0..2 {
while self.vbiti < vend {
if vbit_is_set(&self.vbit, self.vbiti) {
self.vbiti += danum;
continue;
}
let mut p0 = self.vbiti;
while p0 > 0 && !vbit_is_set(&self.vbit, p0 - 1) {
p0 -= 1;
}
let mut p1 = p0 + danum - 1;
let mut found = true;
while p1 > self.vbiti {
if vbit_is_set(&self.vbit, p1) {
found = false;
break;
}
p1 -= 1;
}
if !found {
self.vbiti += danum;
continue;
}
self.vbiti = p0 + danum;
for k in p0..self.vbiti {
self.vbit[(k >> 5) as usize] |= 1u32 << (k & 31);
}
return p0 * 4;
}
self.vbiti = 0;
}
panic!("voxalloc: vbuf full (cannot allocate {n_bytes} bytes)");
}
pub fn voxdealloc(&mut self, byte_offset: u32) {
assert!(
!self.vbit.is_empty(),
"voxdealloc requires reserve_edit_capacity"
);
let len_bytes = u32::try_from(slng(&self.data[byte_offset as usize..]))
.expect("slab length fits in u32");
let i = byte_offset / 4;
let j = (byte_offset + len_bytes) / 4;
let i_word = (i >> 5) as usize;
let j_word = (j >> 5) as usize;
let i_bit = i & 31;
let j_bit = j & 31;
if i_word == j_word {
let mask = p2m(j_bit) ^ p2m(i_bit);
self.vbit[i_word] &= !mask;
} else {
self.vbit[i_word] &= p2m(i_bit);
self.vbit[j_word] &= !p2m(j_bit);
for w in (i_word + 1)..j_word {
self.vbit[w] = 0;
}
}
}
}
#[must_use]
pub fn slng(slab: &[u8]) -> usize {
let mut i = 0usize;
while slab[i] != 0 {
i += usize::from(slab[i]) * 4;
}
let z1 = i32::from(slab[i + 1]);
let z1c = i32::from(slab[i + 2]);
let n_floor = usize::try_from((z1c - z1 + 1).max(0)).expect("n_floor non-negative");
i + n_floor * 4 + 4
}
#[inline]
fn p2m(k: u32) -> u32 {
debug_assert!(k <= 31, "p2m takes 0..=31 (got {k})");
if k == 0 {
0
} else {
(1u32 << k) - 1
}
}
#[inline]
fn vbit_is_set(vbit: &[u32], dword_idx: u32) -> bool {
let word = (dword_idx >> 5) as usize;
let bit = dword_idx & 31;
(vbit[word] >> bit) & 1 != 0
}
const MAXZDIM: i32 = 256;
const MIXC_BUCKETS: usize = (MAXZDIM as usize) >> 1;
const MIXC_LANES: usize = 8;
const QMULMIP: [u32; 8] = [
0x7fff, 0x4000, 0x2aaa, 0x2000, 0x1999, 0x1555, 0x1249, 0x1000,
];
#[allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)]
fn average_packed_colours(lanes: &[i32], n: usize) -> i32 {
debug_assert!((1..=MIXC_LANES).contains(&n));
let mul = QMULMIP[n - 1];
let mut sum = [0u32; 4];
for &c in &lanes[..n] {
let c = c as u32;
sum[0] += c & 0xff;
sum[1] += (c >> 8) & 0xff;
sum[2] += (c >> 16) & 0xff;
sum[3] += (c >> 24) & 0xff;
}
let mut out = 0u32;
for (b, &s) in sum.iter().enumerate() {
let v = s.wrapping_mul(2).wrapping_add(1).wrapping_mul(mul) >> 16;
let v = v.min(255);
out |= v << (b * 8);
}
out as i32
}
#[allow(
clippy::cast_sign_loss,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::similar_names,
clippy::too_many_lines,
clippy::unnecessary_cast,
clippy::needless_range_loop
)]
fn build_mip_level(
data: &[u8],
src_offsets: &[u32],
src_vsid: u32,
dst_vsid: u32,
) -> (Vec<u8>, Vec<u32>) {
let src_vsid_us = src_vsid as usize;
let dst_vsid_us = dst_vsid as usize;
debug_assert_eq!(src_offsets.len(), src_vsid_us * src_vsid_us + 1);
let dst_n_cols = dst_vsid_us * dst_vsid_us;
let mut new_data: Vec<u8> = Vec::with_capacity(dst_n_cols * 8);
let mut new_offsets: Vec<u32> = Vec::with_capacity(dst_n_cols + 1);
let data_base = u32::try_from(data.len()).expect("data offset within u32");
let mut mixc: Vec<i32> = vec![0; MIXC_BUCKETS * MIXC_LANES];
let mut mixn: Vec<u8> = vec![0; MIXC_BUCKETS];
let mut tbuf: Vec<u8> = Vec::with_capacity(1028);
for y in 0..dst_vsid_us {
for x in 0..dst_vsid_us {
mixn.fill(0);
tbuf.clear();
tbuf.resize(4, 0);
let src_idx = [
(2 * y) * src_vsid_us + (2 * x),
(2 * y) * src_vsid_us + (2 * x + 1),
(2 * y + 1) * src_vsid_us + (2 * x),
(2 * y + 1) * src_vsid_us + (2 * x + 1),
];
let mut v_offset = [0usize; 4]; for k in 0..4 {
v_offset[k] = src_offsets[src_idx[k]] as usize;
}
let mut curz = [0i32; 4];
let mut curzn = [[0i32; 4]; 4];
for i in 0..4 {
let mut tv = v_offset[i];
curz[i] = i32::from(data[tv + 1]);
curzn[i][0] = curz[i];
curzn[i][1] = i32::from(data[tv + 2]) + 1;
loop {
let oz = i32::from(data[tv + 1]);
let z1c = i32::from(data[tv + 2]);
let mut z = oz;
while z <= z1c {
let nz = (z >> 1) as usize;
let rec_off = tv + (((z - oz) << 2) + 4) as usize;
let rec = i32::from_le_bytes([
data[rec_off],
data[rec_off + 1],
data[rec_off + 2],
data[rec_off + 3],
]);
let n_lane = mixn[nz] as usize;
mixc[nz * MIXC_LANES + n_lane] = rec;
mixn[nz] += 1;
z += 1;
}
let nextptr = i32::from(data[tv]);
let mut z_carry = (z - oz) - (nextptr - 1);
if nextptr == 0 {
break;
}
tv += (nextptr as usize) << 2;
let oz_new = i32::from(data[tv + 3]);
while z_carry < 0 {
let nz = ((z_carry + oz_new) >> 1) as usize;
let signed_off = (z_carry << 2) as isize;
let rec_off = (tv as isize + signed_off) as usize;
let rec = i32::from_le_bytes([
data[rec_off],
data[rec_off + 1],
data[rec_off + 2],
data[rec_off + 3],
]);
let n_lane = mixn[nz] as usize;
mixc[nz * MIXC_LANES + n_lane] = rec;
mixn[nz] += 1;
z_carry += 1;
}
v_offset[i] = tv;
}
v_offset[i] = src_offsets[src_idx[i]] as usize;
}
let mut cstat: i32 = 0;
let mut oldn: usize = 0;
let mut n: usize = 4;
let mut z: i32 = i32::MIN; let mut cz: i32 = -1;
loop {
let oz = z;
let mut besti = (((curz[1].wrapping_sub(curz[0])) as u32) >> 31) as i32;
let i_alt =
((((curz[3].wrapping_sub(curz[2])) as u32) >> 31) as i32).wrapping_add(2);
let delta = curz[i_alt as usize].wrapping_sub(curz[besti as usize]);
besti = besti.wrapping_add((delta >> 31) & (i_alt - besti));
z = curz[besti as usize];
if z >= MAXZDIM {
break;
}
if cstat == 0 && (z >> 1) >= ((oz + 1) >> 1) {
if oz >= 0 {
tbuf[oldn] = ((n - oldn) >> 2) as u8;
tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
ensure_capacity(&mut tbuf, n + 4);
tbuf[n + 3] = (((oz + 1) >> 1) & 0xff) as u8;
oldn = n;
n += 4;
}
ensure_capacity(&mut tbuf, oldn + 4);
tbuf[oldn] = 0;
let initial = ((z >> 1) & 0xff) as u8;
tbuf[oldn + 1] = initial;
tbuf[oldn + 2] = initial;
cz = -1;
}
if cstat & 0x1111 != 0 {
let tbuf_z1c = i32::from(tbuf[oldn + 2]);
if (tbuf_z1c << 1) + 1 >= oz && cz < 0 {
while (i32::from(tbuf[oldn + 2]) << 1) < z {
let zz = i32::from(tbuf[oldn + 2]) as usize;
let n_vox = mixn[zz] as usize;
let avg = if n_vox == 0 {
0
} else {
let lo = zz * MIXC_LANES;
average_packed_colours(&mixc[lo..lo + n_vox], n_vox)
};
mixn[zz] = 0;
ensure_capacity(&mut tbuf, n + 4);
tbuf[n..n + 4].copy_from_slice(&avg.to_le_bytes());
tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_add(1);
n += 4;
}
} else {
if cz < 0 {
cz = oz >> 1;
} else if (cz << 1) + 1 < oz {
tbuf[oldn] = ((n - oldn) >> 2) as u8;
tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
ensure_capacity(&mut tbuf, n + 4);
tbuf[n] = 0;
let cz_byte = (cz & 0xff) as u8;
tbuf[n + 1] = cz_byte;
tbuf[n + 2] = cz_byte;
tbuf[n + 3] = cz_byte;
oldn = n;
n += 4;
cz = oz >> 1;
}
while (cz << 1) < z {
let zz = cz as usize;
let n_vox = mixn[zz] as usize;
let avg = if n_vox == 0 {
0
} else {
let lo = zz * MIXC_LANES;
average_packed_colours(&mixc[lo..lo + n_vox], n_vox)
};
mixn[zz] = 0;
ensure_capacity(&mut tbuf, n + 4);
tbuf[n..n + 4].copy_from_slice(&avg.to_le_bytes());
cz += 1;
n += 4;
}
}
}
let bit_pos = (besti << 2) as i32;
cstat = ((1i32 << bit_pos).wrapping_add(cstat)) & 0x3333;
let state = (cstat >> bit_pos) & 3;
let bi = besti as usize;
match state {
0 => curz[bi] = curzn[bi][0],
1 => curz[bi] = curzn[bi][1],
2 => {
let tv = v_offset[bi];
if data[tv] == 0 {
curz[bi] = MAXZDIM;
} else {
let n_floor = i32::from(data[tv + 2]) - i32::from(data[tv + 1]) + 1;
let i_carry = n_floor - (i32::from(data[tv]) - 1);
let new_tv = tv + ((i32::from(data[tv]) as usize) << 2);
curz[bi] = i32::from(data[new_tv + 3]) + i_carry;
curzn[bi][3] = i32::from(data[new_tv + 3]);
curzn[bi][0] = i32::from(data[new_tv + 1]);
curzn[bi][1] = i32::from(data[new_tv + 2]) + 1;
v_offset[bi] = new_tv;
}
}
3 => curz[bi] = curzn[bi][3],
_ => unreachable!("state is masked to 0..=3"),
}
}
tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
if cz >= 0 {
tbuf[oldn] = ((n - oldn) >> 2) as u8;
ensure_capacity(&mut tbuf, n + 4);
tbuf[n] = 0;
let cz_byte = (cz & 0xff) as u8;
tbuf[n + 1] = cz_byte;
tbuf[n + 2] = (cz - 1) as u8;
tbuf[n + 3] = cz_byte;
n += 4;
}
let col_start = data_base
+ u32::try_from(new_data.len()).expect("mip data fits in u32 byte addressing");
new_offsets.push(col_start);
new_data.extend_from_slice(&tbuf[..n]);
}
}
new_offsets.push(
data_base + u32::try_from(new_data.len()).expect("mip data fits in u32 byte addressing"),
);
(new_data, new_offsets)
}
fn ensure_capacity(tbuf: &mut Vec<u8>, len_inclusive: usize) {
if tbuf.len() < len_inclusive {
tbuf.resize(len_inclusive, 0);
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ParseError {
TooSmall { got: usize },
BadMagic { got: u32 },
NonSquareVsid { x: u32, y: u32 },
Truncated { at: usize, need: usize },
BadColumn { idx: u32, at: usize },
FileTooLarge { got: usize },
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::TooSmall { got } => write!(
f,
"vxl file too small ({got} bytes; need at least 108 byte header)"
),
Self::BadMagic { got } => {
write!(f, "vxl bad magic: got {got:#010x}, expected 0x09072000")
}
Self::NonSquareVsid { x, y } => write!(
f,
"vxl non-square dimensions: xdim={x}, ydim={y} (must be equal)"
),
Self::Truncated { at, need } => {
write!(f, "vxl truncated: need {need} bytes at offset {at}")
}
Self::BadColumn { idx, at } => write!(
f,
"vxl column {idx}: slab walker overran data region at offset {at}"
),
Self::FileTooLarge { got } => write!(
f,
"vxl file size {got} exceeds {} bytes that this parser handles",
u32::MAX
),
}
}
}
impl std::error::Error for ParseError {}
impl From<OutOfBounds> for ParseError {
fn from(e: OutOfBounds) -> Self {
Self::Truncated {
at: e.at,
need: e.need,
}
}
}
pub fn parse(bytes: &[u8]) -> Result<Vxl, ParseError> {
if bytes.len() < HEADER_LEN {
return Err(ParseError::TooSmall { got: bytes.len() });
}
if u32::try_from(bytes.len()).is_err() {
return Err(ParseError::FileTooLarge { got: bytes.len() });
}
let mut cur = Cursor::new(bytes);
let magic = cur.read_u32()?;
if magic != MAGIC {
return Err(ParseError::BadMagic { got: magic });
}
let xdim = cur.read_u32()?;
let ydim = cur.read_u32()?;
if xdim != ydim {
return Err(ParseError::NonSquareVsid { x: xdim, y: ydim });
}
let vsid = xdim;
let ipo = read_dpoint3d(&mut cur)?;
let ist = read_dpoint3d(&mut cur)?;
let ihe = read_dpoint3d(&mut cur)?;
let ifo = read_dpoint3d(&mut cur)?;
let data_start = cur.pos;
let data: Box<[u8]> = bytes[data_start..].to_vec().into_boxed_slice();
let n_cols = (vsid as usize) * (vsid as usize);
let mut column_offset = Vec::with_capacity(n_cols + 1);
let mut pos = 0usize;
for i in 0..n_cols {
column_offset.push(u32::try_from(pos).expect("data offset within u32"));
loop {
if pos + 4 > data.len() {
return Err(ParseError::BadColumn {
idx: u32::try_from(i).unwrap_or(u32::MAX),
at: pos,
});
}
let nextptr = data[pos];
if nextptr == 0 {
let z1 = data[pos + 1];
let z1c = data[pos + 2];
let n_floor_signed = i32::from(z1c) - i32::from(z1) + 1;
let n_floor = usize::try_from(n_floor_signed.max(0))
.expect("n_floor non-negative after .max(0)");
let last_size = 4 + n_floor * 4;
if pos + last_size > data.len() {
return Err(ParseError::BadColumn {
idx: u32::try_from(i).unwrap_or(u32::MAX),
at: pos,
});
}
pos += last_size;
break;
}
let advance = usize::from(nextptr) * 4;
if advance < 4 {
return Err(ParseError::BadColumn {
idx: u32::try_from(i).unwrap_or(u32::MAX),
at: pos,
});
}
pos += advance;
}
}
column_offset.push(u32::try_from(pos).expect("data offset within u32"));
let mip_base_offsets = Box::new([0usize, n_cols + 1]);
Ok(Vxl {
vsid,
ipo,
ist,
ihe,
ifo,
data,
column_offset: column_offset.into_boxed_slice(),
mip_base_offsets,
vbit: Box::new([]),
vbiti: 0,
})
}
#[must_use]
pub fn serialize(vxl: &Vxl) -> Vec<u8> {
let mut out = Vec::with_capacity(HEADER_LEN + vxl.data.len());
out.extend_from_slice(&MAGIC.to_le_bytes());
out.extend_from_slice(&vxl.vsid.to_le_bytes());
out.extend_from_slice(&vxl.vsid.to_le_bytes());
write_dpoint3d(&mut out, &vxl.ipo);
write_dpoint3d(&mut out, &vxl.ist);
write_dpoint3d(&mut out, &vxl.ihe);
write_dpoint3d(&mut out, &vxl.ifo);
out.extend_from_slice(&vxl.data);
out
}
fn read_dpoint3d(cur: &mut Cursor<'_>) -> Result<[f64; 3], OutOfBounds> {
let mut out = [0.0f64; 3];
for v in &mut out {
let buf = cur.read_bytes(8)?;
*v = f64::from_bits(u64::from_le_bytes([
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
]));
}
Ok(out)
}
fn write_dpoint3d(out: &mut Vec<u8>, p: &[f64; 3]) {
for v in p {
out.extend_from_slice(&v.to_bits().to_le_bytes());
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use flate2::read::GzDecoder;
use super::*;
const ORACLE_VXL_GZ: &[u8] = include_bytes!("../../../assets/oracle.vxl.gz");
fn decode_oracle() -> Vec<u8> {
let mut decoder = GzDecoder::new(ORACLE_VXL_GZ);
let mut out = Vec::with_capacity(40 * 1024 * 1024);
decoder.read_to_end(&mut out).expect("ungzip oracle.vxl.gz");
out
}
#[test]
fn parse_oracle_header() {
let bytes = decode_oracle();
let vxl = parse(&bytes).expect("parse oracle.vxl");
assert_eq!(vxl.vsid, 2048);
let bits = |a: [f64; 3]| a.map(f64::to_bits);
assert_eq!(bits(vxl.ipo), bits([1024.0, 1024.0, 128.0]));
assert_eq!(bits(vxl.ist), bits([1.0, 0.0, 0.0]));
assert_eq!(bits(vxl.ihe), bits([0.0, 0.0, 1.0]));
assert_eq!(bits(vxl.ifo), bits([0.0, 1.0, 0.0]));
assert_eq!(vxl.column_offset.len(), 4_194_304 + 1);
}
#[test]
fn oracle_columns_partition_data_exactly() {
let bytes = decode_oracle();
let vxl = parse(&bytes).expect("parse oracle.vxl");
assert_eq!(vxl.column_offset[0], 0);
assert_eq!(
vxl.column_offset[vxl.column_offset.len() - 1] as usize,
vxl.data.len()
);
let n_cols = (vxl.vsid as usize) * (vxl.vsid as usize);
let min_col_len = (0..n_cols)
.map(|i| vxl.column_data(i).len())
.min()
.expect("at least one column");
assert!(min_col_len >= 4);
}
#[test]
fn oracle_solid_corner_column_has_minimal_slab() {
let bytes = decode_oracle();
let vxl = parse(&bytes).expect("parse oracle.vxl");
let col = vxl.column_data(0);
assert!(
col.len() <= 32,
"solid corner column should be tiny; got {} bytes",
col.len()
);
}
#[test]
fn oracle_roundtrips_byte_equal() {
let bytes = decode_oracle();
let vxl = parse(&bytes).expect("parse oracle.vxl");
let out = serialize(&vxl);
assert_eq!(out.len(), bytes.len(), "length differs");
assert_eq!(out, bytes, "byte content differs");
}
#[test]
fn parse_truncated_header_fails() {
let r = parse(&[0u8; 32]);
assert!(matches!(r, Err(ParseError::TooSmall { .. })));
}
#[test]
fn parse_bad_magic_fails() {
let mut bad = decode_oracle();
bad[0] ^= 0xff;
let r = parse(&bad);
assert!(matches!(r, Err(ParseError::BadMagic { .. })));
}
fn build_synthetic_2x2(colours: [u32; 4]) -> Vxl {
let mut data = Vec::with_capacity(32);
for col_colour in colours {
data.extend_from_slice(&[0, 10, 10, 0]);
data.extend_from_slice(&col_colour.to_le_bytes());
}
let column_offset: Box<[u32]> = vec![0u32, 8, 16, 24, 32].into_boxed_slice();
Vxl {
vsid: 2,
ipo: [0.0; 3],
ist: [1.0, 0.0, 0.0],
ihe: [0.0, 0.0, 1.0],
ifo: [0.0, 1.0, 0.0],
data: data.into_boxed_slice(),
column_offset,
mip_base_offsets: Box::new([0, 5]),
vbit: Box::new([]),
vbiti: 0,
}
}
#[test]
fn generate_mips_skips_when_max_le_1() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
let before_data_len = vxl.data.len();
vxl.generate_mips(0);
vxl.generate_mips(1);
assert_eq!(vxl.mip_count(), 1);
assert_eq!(vxl.data.len(), before_data_len);
assert_eq!(vxl.mip_base_offsets.as_ref(), &[0usize, 5]);
}
#[test]
fn generate_mips_2x2_produces_one_voxel_at_z5() {
let colours = [
0x0001_0101u32,
0x0002_0202u32,
0x0003_0303u32,
0x0004_0404u32,
];
let mut vxl = build_synthetic_2x2(colours);
vxl.generate_mips(2);
assert_eq!(vxl.mip_count(), 2);
assert_eq!(vxl.column_offset_for_mip(0).len(), 5);
assert_eq!(vxl.column_offset_for_mip(1).len(), 2);
let col = vxl.column_data_for_mip(1, 0);
assert_eq!(col.len(), 8, "mip-1 column bytes: {col:?}");
assert_eq!(col[0], 0);
assert_eq!(col[1], 5);
assert_eq!(col[2], 5);
assert_eq!(col[3], 0);
assert_eq!(col[4], 2, "B");
assert_eq!(col[5], 2, "G");
assert_eq!(col[6], 2, "R");
assert_eq!(col[7], 0, "A");
}
#[test]
fn generate_mips_idempotent_across_calls() {
let colours = [0x10u32, 0x20, 0x30, 0x40];
let mut a = build_synthetic_2x2(colours);
let mut b = build_synthetic_2x2(colours);
a.generate_mips(2);
b.generate_mips(2);
b.generate_mips(2);
assert_eq!(a.data, b.data);
assert_eq!(a.column_offset, b.column_offset);
assert_eq!(a.mip_base_offsets, b.mip_base_offsets);
}
#[test]
fn generate_mips_oracle_full_depth() {
let bytes = decode_oracle();
let mut vxl = parse(&bytes).expect("parse oracle.vxl");
let mip0_data_len = vxl.column_offset[(2048 * 2048) as usize] as usize;
let mip0_data_snapshot = vxl.data[..mip0_data_len].to_vec();
vxl.generate_mips(4);
assert_eq!(vxl.mip_count(), 4);
for mip in 0..4u32 {
let dim = (2048u32 >> mip) as usize;
assert_eq!(
vxl.column_offset_for_mip(mip).len(),
dim * dim + 1,
"mip-{mip} offset table length"
);
}
assert_eq!(&vxl.data[..mip0_data_len], &mip0_data_snapshot[..]);
}
#[test]
fn slng_single_slab_with_one_floor_voxel() {
let slab = [0u8, 10, 10, 0, 0xff, 0, 0, 0];
assert_eq!(slng(&slab), 8);
}
#[test]
fn slng_single_slab_with_three_floor_voxels() {
let slab = [0u8, 10, 12, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0];
assert_eq!(slng(&slab), 16);
}
#[test]
fn slng_single_slab_empty_floor() {
let slab = [0u8, 10, 9, 0];
assert_eq!(slng(&slab), 4);
}
#[test]
fn slng_two_slab_chain() {
let slab = [
2u8, 10, 11, 0, 0xaa, 0, 0, 0, 0, 20, 22, 12, 0xbb, 0, 0, 0, 0xcc, 0, 0, 0, 0xdd, 0, 0, 0, ];
assert_eq!(slng(&slab), 24);
}
#[test]
fn reserve_edit_capacity_grows_data_and_marks_existing() {
let mut vxl = build_synthetic_2x2([0xaa, 0xbb, 0xcc, 0xdd]);
let original_len = vxl.data.len();
assert_eq!(original_len, 32);
vxl.reserve_edit_capacity(64);
assert_eq!(vxl.data.len(), 32 + 64);
assert_eq!(vxl.data[0..4], [0, 10, 10, 0]);
assert_eq!(vxl.vbit.len(), 1);
assert_eq!(vxl.vbit[0], 0xff);
assert_eq!(vxl.vbiti, 0);
}
#[test]
fn reserve_edit_capacity_aligns_headroom_up_to_dword() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(5);
assert_eq!(vxl.data.len(), 32 + 8);
}
#[test]
fn voxalloc_returns_offset_in_headroom() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(64);
let off = vxl.voxalloc(8);
assert_eq!(off, 32);
assert_eq!(vxl.vbit[0] & ((1 << 8) | (1 << 9)), (1 << 8) | (1 << 9));
}
#[test]
fn voxalloc_successive_returns_non_overlapping() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(128);
let a = vxl.voxalloc(8);
let b = vxl.voxalloc(8);
let c = vxl.voxalloc(16);
assert_eq!(a, 32);
assert_eq!(b, 40);
assert_eq!(c, 48);
}
#[test]
fn voxdealloc_clears_bits() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(128);
let off = vxl.voxalloc(8);
assert_eq!((vxl.vbit[0] >> 8) & 1, 1);
assert_eq!((vxl.vbit[0] >> 9) & 1, 1);
vxl.data[off as usize..off as usize + 8]
.copy_from_slice(&[0, 5, 5, 0, 0xa1, 0xa2, 0xa3, 0xa4]);
vxl.voxdealloc(off);
assert_eq!((vxl.vbit[0] >> 8) & 1, 0);
assert_eq!((vxl.vbit[0] >> 9) & 1, 0);
}
#[test]
fn voxdealloc_freed_region_reused_after_full_scan() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(32);
let a = vxl.voxalloc(8);
let _b = vxl.voxalloc(8);
let _c = vxl.voxalloc(8);
vxl.data[a as usize..a as usize + 8].copy_from_slice(&[0, 5, 5, 0, 0xa1, 0xa2, 0xa3, 0xa4]);
vxl.voxdealloc(a);
let reused = vxl.voxalloc(8);
assert_eq!(reused, a, "freed region should be reused on rescan");
}
#[test]
fn voxdealloc_cross_word_boundary() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(192);
let _ = vxl.voxalloc(96);
assert_eq!(vxl.vbiti, 32);
let off = vxl.voxalloc(16);
assert_eq!(off, 32 * 4); vxl.data[off as usize..off as usize + 16]
.copy_from_slice(&[0, 0, 2, 0, 0xa, 0, 0, 0, 0xb, 0, 0, 0, 0xc, 0, 0, 0]);
let mut vxl2 = build_synthetic_2x2([1, 2, 3, 4]);
vxl2.reserve_edit_capacity(192);
let pad = vxl2.voxalloc(22 * 4);
assert_eq!(pad, 32);
let cross = vxl2.voxalloc(16);
assert_eq!(cross, 30 * 4);
assert!(
(vxl2.vbit[0] >> 30) & 1 == 1
&& (vxl2.vbit[0] >> 31) & 1 == 1
&& vxl2.vbit[1] & 1 == 1
&& (vxl2.vbit[1] >> 1) & 1 == 1,
"bits across word boundary should all be set"
);
vxl2.data[cross as usize..cross as usize + 16]
.copy_from_slice(&[0, 0, 2, 0, 0xa, 0, 0, 0, 0xb, 0, 0, 0, 0xc, 0, 0, 0]);
vxl2.voxdealloc(cross);
assert_eq!((vxl2.vbit[0] >> 30) & 1, 0);
assert_eq!((vxl2.vbit[0] >> 31) & 1, 0);
assert_eq!(vxl2.vbit[1] & 1, 0);
assert_eq!((vxl2.vbit[1] >> 1) & 1, 0);
}
#[test]
#[should_panic(expected = "voxalloc requires reserve_edit_capacity")]
fn voxalloc_panics_without_reserve() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
let _ = vxl.voxalloc(8);
}
#[test]
#[should_panic(expected = "voxalloc n_bytes must be a positive multiple of 4")]
fn voxalloc_panics_on_bad_size() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(64);
let _ = vxl.voxalloc(7);
}
#[test]
#[should_panic(expected = "voxalloc: vbuf full")]
fn voxalloc_panics_when_pool_full() {
let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
vxl.reserve_edit_capacity(16);
let _ = vxl.voxalloc(8);
let _ = vxl.voxalloc(8);
let _ = vxl.voxalloc(8); }
}