roxlap_formats/vxl.rs
1//! `.vxl` voxel-map format (Voxlap world / heightmap + slab columns).
2//!
3//! Reference: voxlaptest's `loadvxl` / `savevxl` (`voxlap5.c:3828` /
4//! `:3887`) and the `vbuf` slab layout comment at
5//! `voxlap5.c:75`. File layout (all multi-byte fields are little-
6//! endian):
7//!
8//! ```text
9//! offset size description
10//! 0x00 u32 magic = 0x09072000
11//! 0x04 u32 xdim (must equal ydim — VSID)
12//! 0x08 u32 ydim
13//! 0x0c 24 bytes ipo: starting position (3 × f64)
14//! 0x24 24 bytes ist: right vector (3 × f64)
15//! 0x3c 24 bytes ihe: down vector (3 × f64)
16//! 0x54 24 bytes ifo: forward vector (3 × f64)
17//! 0x6c variable column slab data (`vsid * vsid` columns)
18//! ```
19//!
20//! Each column's slab data is a chain of slabs:
21//!
22//! ```text
23//! slab header (4 bytes):
24//! byte 0 nextptr — offset to next slab in dwords (== 0 for last slab)
25//! byte 1 z1 — top z of floor-colour list
26//! byte 2 z1c — bottom z of floor-colour list MINUS 1
27//! byte 3 z0 — ceiling z (additional slabs); dummy in the first
28//! followed by (per-voxel) 4-byte BGRA colour records.
29//! ```
30//!
31//! Walker semantics, copied from `loadvxl`:
32//!
33//! - Non-last slab: total bytes = `nextptr * 4`. Advance by `nextptr * 4`.
34//! - Last slab (`nextptr == 0`): total bytes = `4 + (z1c - z1 + 1) * 4`
35//! (header + floor colours; no ceiling colours).
36//!
37//! This module preserves column slab bytes verbatim in [`Vxl::data`] and
38//! exposes a per-column byte-offset table in [`Vxl::column_offset`].
39//! Iterating individual slabs (interpreting ceiling vs floor colour
40//! lists) is left for a follow-up — the world fixture is large enough
41//! that the test workload favours a flat byte representation, and the
42//! engine port (R4) walks the bytes directly anyway.
43
44use core::fmt;
45
46use crate::bytes::{Cursor, OutOfBounds};
47
48const MAGIC: u32 = 0x0907_2000;
49const HEADER_LEN: usize = 4 + 4 + 4 + 4 * 24;
50
51/// Parsed `.vxl` map. Round-trips byte-equally via [`parse`] +
52/// [`serialize`].
53#[derive(Debug, Clone)]
54pub struct Vxl {
55 /// Square map dimension. xdim and ydim are both equal to this in
56 /// the file format (the loader rejects non-square maps).
57 pub vsid: u32,
58 /// Starting camera position (`dpoint3d`).
59 pub ipo: [f64; 3],
60 /// Right vector (`dpoint3d`).
61 pub ist: [f64; 3],
62 /// Down vector (`dpoint3d`).
63 pub ihe: [f64; 3],
64 /// Forward vector (`dpoint3d`).
65 pub ifo: [f64; 3],
66 /// Concatenated raw slab data for all columns across every built
67 /// mip level. `parse` returns mip-0 data only; [`Vxl::generate_mips`]
68 /// appends mip-1..mip-N onto the tail.
69 pub data: Box<[u8]>,
70 /// Per-column byte offsets into [`Vxl::data`], concatenated across
71 /// every built mip level. Mip-N's sub-table lives at indices
72 /// `mip_base_offsets[N]..mip_base_offsets[N + 1]` and contains
73 /// `(vsid >> N)² + 1` entries — the trailing sentinel equals the
74 /// byte offset where mip-(N+1)'s data starts (or `data.len()`
75 /// for the topmost mip). After `parse`, the table is just mip-0
76 /// (`vsid² + 1` entries) and the layout matches the historical
77 /// single-mip shape, so callers passing `&vxl.column_offset`
78 /// straight into the rasterizer keep working.
79 pub column_offset: Box<[u32]>,
80 /// `mip_base_offsets[mip]` is the index in [`Vxl::column_offset`]
81 /// where mip-`mip`'s sub-table begins. `len() == mip_count + 1`;
82 /// the trailing sentinel equals `column_offset.len()`. Initial
83 /// state after `parse`: `[0, vsid² + 1]` (one mip).
84 pub mip_base_offsets: Box<[usize]>,
85 /// Slab-pool allocation bitmap — 1 bit per dword in [`Vxl::data`].
86 /// A set bit marks the dword as belonging to an allocated column
87 /// or mip-segment; clear bits are free space available to
88 /// [`Vxl::voxalloc`]. Voxlap's `vbit` (`voxlap5.c:72`).
89 ///
90 /// Empty after [`parse`] — read-only Vxls have no allocator
91 /// state. Call [`Vxl::reserve_edit_capacity`] to upgrade to a
92 /// mutation-ready slab pool.
93 pub vbit: Box<[u32]>,
94 /// Roving allocator index, in dwords. Voxlap's `vbiti`
95 /// (`voxlap5.c:72`). Advances past each successful
96 /// [`Vxl::voxalloc`] to amortise the search.
97 pub vbiti: u32,
98}
99
100impl Vxl {
101 /// Raw slab bytes for mip-0 column `idx` (`idx < vsid * vsid`).
102 /// Equivalent to `column_data_for_mip(0, idx)` — kept for the
103 /// pre-multi-mip call sites.
104 ///
105 /// Length is recovered by walking the slab chain via [`slng`].
106 /// This works both pre-edit (when columns are contiguous and
107 /// `column_offset[idx + 1]` would also bound the column) and
108 /// post-edit (when [`Vxl::voxalloc`] has scattered columns
109 /// across the vbuf pool).
110 #[must_use]
111 pub fn column_data(&self, idx: usize) -> &[u8] {
112 let start = self.column_offset[idx] as usize;
113 let end = start + slng(&self.data[start..]);
114 &self.data[start..end]
115 }
116
117 /// How many mip levels are currently built. Always `>= 1`
118 /// (mip-0 is the parsed file). [`Vxl::generate_mips`] grows this
119 /// up to its `max_mips` argument (capped by the world's
120 /// `vsid` halving).
121 ///
122 /// # Panics
123 ///
124 /// Cannot panic in practice — `mip_base_offsets.len() - 1`
125 /// fits in `u32` for any realistic `vsid`.
126 #[must_use]
127 pub fn mip_count(&self) -> u32 {
128 u32::try_from(self.mip_base_offsets.len() - 1).expect("mip count fits in u32")
129 }
130
131 /// Per-column byte-offset sub-table for mip `mip`. Length
132 /// `(vsid >> mip)² + 1`; the trailing sentinel is the byte
133 /// offset where this mip's data ends inside [`Vxl::data`].
134 ///
135 /// # Panics
136 ///
137 /// Panics if `mip >= mip_count()`.
138 #[must_use]
139 pub fn column_offset_for_mip(&self, mip: u32) -> &[u32] {
140 let mip_idx = mip as usize;
141 let lo = self.mip_base_offsets[mip_idx];
142 let hi = self.mip_base_offsets[mip_idx + 1];
143 &self.column_offset[lo..hi]
144 }
145
146 /// Raw slab bytes for column `idx` at mip `mip`. `idx` must be
147 /// `< (vsid >> mip)²`. Length recovered via [`slng`] (works
148 /// both pre-edit and after [`Vxl::voxalloc`]-driven scatter).
149 ///
150 /// # Panics
151 ///
152 /// Panics if `mip >= mip_count()` or `idx` is past this mip's
153 /// column count.
154 #[must_use]
155 pub fn column_data_for_mip(&self, mip: u32, idx: usize) -> &[u8] {
156 let table = self.column_offset_for_mip(mip);
157 let start = table[idx] as usize;
158 let end = start + slng(&self.data[start..]);
159 &self.data[start..end]
160 }
161
162 /// Drop any built mip-1+ data, returning the Vxl to its
163 /// post-`parse` single-mip shape. Cheap when already single-mip.
164 fn reset_to_single_mip(&mut self) {
165 let n_cols = (self.vsid as usize) * (self.vsid as usize);
166 if self.mip_base_offsets.len() <= 2 {
167 return;
168 }
169 let mip0_end_in_data = self.column_offset[n_cols] as usize;
170 self.data = self.data[..mip0_end_in_data].to_vec().into_boxed_slice();
171 self.column_offset = self.column_offset[..=n_cols].to_vec().into_boxed_slice();
172 self.mip_base_offsets = Box::new([0, n_cols + 1]);
173 }
174
175 /// Build mip-1..mip-`max_mips` column data in place, mirroring
176 /// voxlap's `genmipvxl` (`voxlap5.c:4710-4944`). Mip-0 is preserved.
177 /// The loop halves dims each level and stops early when either
178 /// dim drops to 1 or `max_mips` is reached.
179 ///
180 /// Calling this method more than once recomputes mips from
181 /// scratch (matches voxlap's idempotent semantics — `genmipvxl`
182 /// is invoked anywhere setcolumn-style mutations happen, and it
183 /// always rebuilds against the current mip-0).
184 ///
185 /// # Panics
186 ///
187 /// Panics on a logic bug: the per-iteration `debug_assert_eq!`
188 /// guards the invariant that the prior trailing
189 /// `mip_base_offsets` entry equals the new sub-table's start.
190 /// Production builds skip the assert.
191 #[allow(clippy::missing_panics_doc)] // covered above
192 pub fn generate_mips(&mut self, max_mips: u32) {
193 self.reset_to_single_mip();
194 if max_mips <= 1 {
195 return;
196 }
197
198 // Outer mip loop. Voxlap5.c:4724-4932: while dims still halve
199 // and we haven't reached `mipmax`, build mip-(mipnum) from
200 // mip-(mipnum-1).
201 let mut mipnum: u32 = 1;
202 let mut src_vsid: u32 = self.vsid;
203 let mut src_z_bound: i32 = MAXZDIM;
204 while src_vsid > 1 && src_z_bound > 1 && mipnum < max_mips {
205 let dst_vsid = src_vsid >> 1;
206 let dst_z_bound = src_z_bound >> 1;
207
208 // Snapshot the source mip's offsets/data before we mutate
209 // self. The source mip is `mipnum - 1` (already built).
210 let src_offsets_lo = self.mip_base_offsets[(mipnum - 1) as usize];
211 let src_offsets_hi = self.mip_base_offsets[mipnum as usize];
212 let src_offsets = self.column_offset[src_offsets_lo..src_offsets_hi].to_vec();
213
214 // Build the new mip into fresh buffers; merge afterwards.
215 let (new_data_segment, new_offsets) =
216 build_mip_level(&self.data, &src_offsets, src_vsid, dst_vsid);
217
218 // Splice into self. New offsets are returned in absolute
219 // byte coords (the source-data prefix is unchanged so
220 // they're already valid when treated as offsets into the
221 // grown data buffer).
222 let mut combined_data = self.data.to_vec();
223 combined_data.extend_from_slice(&new_data_segment);
224 self.data = combined_data.into_boxed_slice();
225
226 let mut combined_offsets = self.column_offset.to_vec();
227 combined_offsets.extend_from_slice(&new_offsets);
228 self.column_offset = combined_offsets.into_boxed_slice();
229
230 // The previous trailing entry was `src_offsets_hi` (the
231 // mip-N sentinel and therefore the start of mip-N+1's
232 // sub-table). Pushing the post-extension `column_offset`
233 // length adds the mip-N+1 sentinel.
234 debug_assert_eq!(
235 *self
236 .mip_base_offsets
237 .last()
238 .expect("mip_base_offsets non-empty"),
239 src_offsets_hi
240 );
241 let mut combined_mips = self.mip_base_offsets.to_vec();
242 combined_mips.push(self.column_offset.len());
243 self.mip_base_offsets = combined_mips.into_boxed_slice();
244
245 mipnum += 1;
246 src_vsid = dst_vsid;
247 src_z_bound = dst_z_bound;
248 }
249 }
250
251 // ---- slab allocator (CD.2 cave-demo edit API) ---------------------
252 //
253 // Voxlap's `vbuf`/`vbit`/`voxalloc`/`voxdealloc` (`voxlap5.c:64-862`)
254 // are a bitmap free-list allocator over the slab byte pool. Roxlap
255 // re-uses [`Vxl::data`] as that pool: existing column bytes stay
256 // put, headroom appended at the tail is the free region. Granularity
257 // is one bit per dword (matches voxlap; column data is dword-aligned
258 // by construction since slabs are 4-byte records).
259 //
260 // The allocator is opt-in. Read-only Vxls leave `vbit` empty and
261 // never call alloc/dealloc.
262
263 /// Upgrade this Vxl to a mutation-ready slab pool.
264 ///
265 /// Grows [`Vxl::data`] by `headroom_bytes` (rounded up to dword)
266 /// and initialises [`Vxl::vbit`] with one bit per dword: bits in
267 /// the prefix covered by every existing mip's columns are marked
268 /// allocated; the appended headroom is free space available to
269 /// [`Vxl::voxalloc`].
270 ///
271 /// Cost: O(total mip dwords) for the bit-marking pass, plus the
272 /// data grow (one allocation + memcpy of existing bytes).
273 ///
274 /// After this call, [`serialize`] no longer round-trips byte-equally
275 /// with the parsed file (the appended headroom is included in
276 /// `data` and would be emitted as trailing zeros). Post-edit save
277 /// requires walking columns in index order — landing in CD.2.5
278 /// when on-disk save matters.
279 ///
280 /// # Panics
281 ///
282 /// Panics if the new total byte length does not fit in `u32`
283 /// (would prevent column offsets from indexing into the pool).
284 pub fn reserve_edit_capacity(&mut self, headroom_bytes: usize) {
285 let headroom_aligned = (headroom_bytes + 3) & !3;
286 let old_len = self.data.len();
287 let new_len = old_len + headroom_aligned;
288 u32::try_from(new_len).expect("vbuf size fits in u32");
289
290 let mut new_data = Vec::with_capacity(new_len);
291 new_data.extend_from_slice(&self.data);
292 new_data.resize(new_len, 0);
293 self.data = new_data.into_boxed_slice();
294
295 let total_dwords = new_len / 4;
296 let n_words = total_dwords.div_ceil(32);
297 let mut vbit = vec![0u32; n_words].into_boxed_slice();
298
299 // Mark every column's dword range as allocated. Iterates over
300 // all built mips so mip-1+ tail data is preserved as
301 // "allocated" (never repurposed by voxalloc, which finds runs
302 // only in genuinely-clear bits).
303 for mip in 0..self.mip_count() {
304 let table = self.column_offset_for_mip(mip);
305 for window in table.windows(2) {
306 let lo = (window[0] / 4) as usize;
307 let hi = (window[1] / 4) as usize;
308 for d in lo..hi {
309 vbit[d >> 5] |= 1u32 << (d & 31);
310 }
311 }
312 }
313
314 self.vbit = vbit;
315 self.vbiti = 0;
316 }
317
318 /// Allocate `n_bytes` of slab pool, returning a byte offset into
319 /// [`Vxl::data`]. Voxlap's `voxalloc` (`voxlap5.c:841`).
320 ///
321 /// `n_bytes` MUST be a positive multiple of 4 (slab records are
322 /// dword-aligned).
323 ///
324 /// # Panics
325 ///
326 /// - Panics if [`Vxl::reserve_edit_capacity`] was not called first
327 /// (`vbit` is empty).
328 /// - Panics if `n_bytes == 0` or not a multiple of 4.
329 /// - Panics if the pool is full (after two scan passes from
330 /// `vbiti` and from 0). Callers should size the headroom in
331 /// `reserve_edit_capacity` to absorb expected churn.
332 pub fn voxalloc(&mut self, n_bytes: u32) -> u32 {
333 assert!(
334 !self.vbit.is_empty(),
335 "voxalloc requires reserve_edit_capacity"
336 );
337 assert!(
338 n_bytes > 0 && n_bytes % 4 == 0,
339 "voxalloc n_bytes must be a positive multiple of 4 (got {n_bytes})"
340 );
341
342 let danum = n_bytes / 4;
343 let total_dwords = u32::try_from(self.data.len() / 4).expect("pool dwords fit in u32");
344 assert!(
345 danum <= total_dwords,
346 "voxalloc: requested span > pool size"
347 );
348 let vend = total_dwords - danum;
349
350 for _badcnt in 0..2 {
351 while self.vbiti < vend {
352 if vbit_is_set(&self.vbit, self.vbiti) {
353 self.vbiti += danum;
354 continue;
355 }
356 // Walk back to the first dword of the surrounding
357 // free run.
358 let mut p0 = self.vbiti;
359 while p0 > 0 && !vbit_is_set(&self.vbit, p0 - 1) {
360 p0 -= 1;
361 }
362 // Verify [p0, p0+danum) is entirely free by probing
363 // backward from p0+danum-1 down to vbiti+1 (the
364 // [p0..=vbiti] half is already proven free by the walk
365 // above, plus vbiti's own bit which we know is clear).
366 let mut p1 = p0 + danum - 1;
367 let mut found = true;
368 while p1 > self.vbiti {
369 if vbit_is_set(&self.vbit, p1) {
370 found = false;
371 break;
372 }
373 p1 -= 1;
374 }
375 if !found {
376 self.vbiti += danum;
377 continue;
378 }
379
380 self.vbiti = p0 + danum;
381 for k in p0..self.vbiti {
382 self.vbit[(k >> 5) as usize] |= 1u32 << (k & 31);
383 }
384 return p0 * 4;
385 }
386 self.vbiti = 0;
387 }
388 panic!("voxalloc: vbuf full (cannot allocate {n_bytes} bytes)");
389 }
390
391 /// Free the slab at `byte_offset` (which must point at the start
392 /// of a slab chain previously returned by [`Vxl::voxalloc`] or
393 /// recorded in a column table). Voxlap's `voxdealloc`
394 /// (`voxlap5.c:822`).
395 ///
396 /// Length is recovered by walking the slab chain via [`slng`] —
397 /// the caller does not pass it.
398 ///
399 /// # Panics
400 ///
401 /// Panics if [`Vxl::reserve_edit_capacity`] was not called first.
402 pub fn voxdealloc(&mut self, byte_offset: u32) {
403 assert!(
404 !self.vbit.is_empty(),
405 "voxdealloc requires reserve_edit_capacity"
406 );
407 let len_bytes = u32::try_from(slng(&self.data[byte_offset as usize..]))
408 .expect("slab length fits in u32");
409 let i = byte_offset / 4;
410 let j = (byte_offset + len_bytes) / 4;
411 let i_word = (i >> 5) as usize;
412 let j_word = (j >> 5) as usize;
413 let i_bit = i & 31;
414 let j_bit = j & 31;
415
416 if i_word == j_word {
417 // Range fits in a single word.
418 let mask = p2m(j_bit) ^ p2m(i_bit);
419 self.vbit[i_word] &= !mask;
420 } else {
421 self.vbit[i_word] &= p2m(i_bit);
422 self.vbit[j_word] &= !p2m(j_bit);
423 for w in (i_word + 1)..j_word {
424 self.vbit[w] = 0;
425 }
426 }
427 }
428}
429
430/// Slab-chain length helper. Voxlap's `slng` (`voxlap5.c:814`). Walks
431/// the next-slab pointer chain rooted at `slab[0]` until the
432/// terminator (`v[0] == 0`), then adds the last slab's floor-colour
433/// bytes (`(z1c - z1 + 1) * 4`) plus the terminating slab's 4-byte
434/// header. Returns total bytes used by the column.
435///
436/// # Panics
437///
438/// Panics on a malformed slab where the chain walk runs past the end
439/// of `slab`, or where the last-slab header reaches outside the
440/// slice. Both are caller errors — `slab` must be a valid voxlap slab
441/// chain originating at `slab[0]`.
442#[must_use]
443pub fn slng(slab: &[u8]) -> usize {
444 let mut i = 0usize;
445 while slab[i] != 0 {
446 i += usize::from(slab[i]) * 4;
447 }
448 let z1 = i32::from(slab[i + 1]);
449 let z1c = i32::from(slab[i + 2]);
450 let n_floor = usize::try_from((z1c - z1 + 1).max(0)).expect("n_floor non-negative");
451 i + n_floor * 4 + 4
452}
453
454/// `p2m[k]` — bitmask with the low `k` bits set (`(1 << k) - 1`).
455/// Voxlap's `p2m` table (a 32-entry static array). `k` MUST be in
456/// `0..=31`.
457#[inline]
458fn p2m(k: u32) -> u32 {
459 debug_assert!(k <= 31, "p2m takes 0..=31 (got {k})");
460 if k == 0 {
461 0
462 } else {
463 (1u32 << k) - 1
464 }
465}
466
467#[inline]
468fn vbit_is_set(vbit: &[u32], dword_idx: u32) -> bool {
469 let word = (dword_idx >> 5) as usize;
470 let bit = dword_idx & 31;
471 (vbit[word] >> bit) & 1 != 0
472}
473
474// ---------- multi-mip generation -----------------------------------------
475//
476// `build_mip_level` and friends below are a dense, cast-heavy port of
477// voxlap5.c:4710-4944. The pedantic-cast lints fire on every line
478// that mirrors a C `int32_t`/`char *` interaction; they're allowed
479// scoped to each function rather than module-wide so the parser
480// keeps its full lint coverage.
481
482/// Maximum z-extent of a column — voxlap's `MAXZDIM` from
483/// `voxlap5.h:10`. Each mip level halves this bound.
484const MAXZDIM: i32 = 256;
485
486/// Number of z-buckets in the per-cell colour-mixing accumulator.
487/// Mip-N+1's z range is `MAXZDIM >> (N+1)`, so the largest first-
488/// transition (mip-0 → mip-1) needs `MAXZDIM/2 = 128` buckets.
489/// Sized once; later mips use a prefix.
490const MIXC_BUCKETS: usize = (MAXZDIM as usize) >> 1;
491
492/// Up to 8 source colours per bucket: 4 source columns × 2 source
493/// z values per `(z >> 1)` bucket = 8.
494const MIXC_LANES: usize = 8;
495
496/// Voxlap5.c:4703-4707 (`qmulmip`). Multiplier table for averaging
497/// `n` colour bytes after `*2 + 1`, then `>> 16`. Originally a
498/// 4-lane packed `int64` (e.g. `0x7fff7fff7fff7fff`); we only need
499/// the low 16 bits because the scalar fallback at
500/// `voxlap5.c:4815-4837` reads the bottom u16 and broadcasts it.
501const QMULMIP: [u32; 8] = [
502 0x7fff, 0x4000, 0x2aaa, 0x2000, 0x1999, 0x1555, 0x1249, 0x1000,
503];
504
505/// Average up to 8 packed BGRA colours (voxlap5.c:4815-4837 scalar
506/// translation). `n` is `1..=8`; `lanes[..n]` are the source
507/// `int32_t` BGRA records.
508#[allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)]
509fn average_packed_colours(lanes: &[i32], n: usize) -> i32 {
510 debug_assert!((1..=MIXC_LANES).contains(&n));
511 let mul = QMULMIP[n - 1];
512 let mut sum = [0u32; 4];
513 for &c in &lanes[..n] {
514 let c = c as u32;
515 sum[0] += c & 0xff;
516 sum[1] += (c >> 8) & 0xff;
517 sum[2] += (c >> 16) & 0xff;
518 sum[3] += (c >> 24) & 0xff;
519 }
520 let mut out = 0u32;
521 // Voxlap rounds via `(sum*2 + 1) * mul >> 16` then saturates to
522 // `0..=255`. The unsigned arithmetic can't go negative, so only
523 // the upper clamp is reachable.
524 for (b, &s) in sum.iter().enumerate() {
525 let v = s.wrapping_mul(2).wrapping_add(1).wrapping_mul(mul) >> 16;
526 let v = v.min(255);
527 out |= v << (b * 8);
528 }
529 out as i32
530}
531
532/// Build mip-N+1 column data + offsets from mip-N source. `data` is
533/// the global byte buffer (mip-0..mip-N concatenated). `src_offsets`
534/// is mip-N's per-column offset sub-table (length `src_vsid² + 1`).
535///
536/// Returns `(new_segment_bytes, new_offsets)`. `new_offsets` is sized
537/// `dst_vsid² + 1` and gives ABSOLUTE byte offsets into the post-
538/// extension data buffer (i.e. starts at `data.len()` and grows from
539/// there).
540#[allow(
541 clippy::cast_sign_loss,
542 clippy::cast_possible_truncation,
543 clippy::cast_possible_wrap,
544 clippy::similar_names,
545 clippy::too_many_lines,
546 clippy::unnecessary_cast,
547 clippy::needless_range_loop
548)]
549fn build_mip_level(
550 data: &[u8],
551 src_offsets: &[u32],
552 src_vsid: u32,
553 dst_vsid: u32,
554) -> (Vec<u8>, Vec<u32>) {
555 let src_vsid_us = src_vsid as usize;
556 let dst_vsid_us = dst_vsid as usize;
557 debug_assert_eq!(src_offsets.len(), src_vsid_us * src_vsid_us + 1);
558
559 let dst_n_cols = dst_vsid_us * dst_vsid_us;
560 let mut new_data: Vec<u8> = Vec::with_capacity(dst_n_cols * 8);
561 let mut new_offsets: Vec<u32> = Vec::with_capacity(dst_n_cols + 1);
562 let data_base = u32::try_from(data.len()).expect("data offset within u32");
563
564 // Per-cell scratch reused across all (x, y) at this mip level.
565 let mut mixc: Vec<i32> = vec![0; MIXC_BUCKETS * MIXC_LANES];
566 let mut mixn: Vec<u8> = vec![0; MIXC_BUCKETS];
567 // tbuf: per-column slab byte stream. Voxlap caps at MAXCSIZ=1028;
568 // we grow as needed.
569 let mut tbuf: Vec<u8> = Vec::with_capacity(1028);
570
571 for y in 0..dst_vsid_us {
572 for x in 0..dst_vsid_us {
573 // Reset per-cell scratch.
574 mixn.fill(0);
575 tbuf.clear();
576 tbuf.resize(4, 0); // header placeholder (voxlap5.c:4779: tbuf[3] = 0; n = 4)
577
578 // 4 source-column byte offsets at (2x, 2y), (2x+1, 2y),
579 // (2x, 2y+1), (2x+1, 2y+1). Voxlap's `oysiz`/`oxsiz` are
580 // equal at every mip (square world).
581 let src_idx = [
582 (2 * y) * src_vsid_us + (2 * x),
583 (2 * y) * src_vsid_us + (2 * x + 1),
584 (2 * y + 1) * src_vsid_us + (2 * x),
585 (2 * y + 1) * src_vsid_us + (2 * x + 1),
586 ];
587 let mut v_offset = [0usize; 4]; // current slab offset per source col
588 for k in 0..4 {
589 v_offset[k] = src_offsets[src_idx[k]] as usize;
590 }
591
592 // ---- Phase 1: flatten each source column's voxels into
593 // `mixc`/`mixn` keyed on `z >> 1`. Voxlap5.c:4754-4778.
594 let mut curz = [0i32; 4];
595 let mut curzn = [[0i32; 4]; 4];
596 for i in 0..4 {
597 let mut tv = v_offset[i];
598 // Initial state: top of floor and end-of-floor + 1.
599 curz[i] = i32::from(data[tv + 1]);
600 curzn[i][0] = curz[i];
601 curzn[i][1] = i32::from(data[tv + 2]) + 1;
602
603 loop {
604 let oz = i32::from(data[tv + 1]);
605 let z1c = i32::from(data[tv + 2]);
606 // Floor records at z = oz..=z1c.
607 let mut z = oz;
608 while z <= z1c {
609 let nz = (z >> 1) as usize;
610 let rec_off = tv + (((z - oz) << 2) + 4) as usize;
611 let rec = i32::from_le_bytes([
612 data[rec_off],
613 data[rec_off + 1],
614 data[rec_off + 2],
615 data[rec_off + 3],
616 ]);
617 let n_lane = mixn[nz] as usize;
618 mixc[nz * MIXC_LANES + n_lane] = rec;
619 mixn[nz] += 1;
620 z += 1;
621 }
622 // Carry-over for the post-advance ceiling loop.
623 let nextptr = i32::from(data[tv]);
624 let mut z_carry = (z - oz) - (nextptr - 1);
625 if nextptr == 0 {
626 break;
627 }
628 tv += (nextptr as usize) << 2;
629 let oz_new = i32::from(data[tv + 3]);
630 while z_carry < 0 {
631 let nz = ((z_carry + oz_new) >> 1) as usize;
632 // Read backwards: tv[z_carry * 4] sits inside
633 // the previous slab's tail, where voxlap
634 // stores the new slab's ceiling records.
635 let signed_off = (z_carry << 2) as isize;
636 let rec_off = (tv as isize + signed_off) as usize;
637 let rec = i32::from_le_bytes([
638 data[rec_off],
639 data[rec_off + 1],
640 data[rec_off + 2],
641 data[rec_off + 3],
642 ]);
643 let n_lane = mixn[nz] as usize;
644 mixc[nz * MIXC_LANES + n_lane] = rec;
645 mixn[nz] += 1;
646 z_carry += 1;
647 }
648 v_offset[i] = tv;
649 }
650 // After the flatten, restore v_offset[i] to the
651 // FIRST slab — phase 2 walks v[besti] from the top
652 // independently of phase 1's tv cursor.
653 v_offset[i] = src_offsets[src_idx[i]] as usize;
654 }
655
656 // ---- Phase 2: 4-way z-merge → emit mip-N+1 slab bytes
657 // into `tbuf`. Voxlap5.c:4779-4918.
658 let mut cstat: i32 = 0;
659 let mut oldn: usize = 0;
660 let mut n: usize = 4;
661 let mut z: i32 = i32::MIN; // 0x80000000 sentinel (voxlap5.c:4779)
662 let mut cz: i32 = -1;
663
664 loop {
665 let oz = z;
666
667 // Min of curz[0..4] using voxlap's branchless dance
668 // (line 4785-4787).
669 let mut besti = (((curz[1].wrapping_sub(curz[0])) as u32) >> 31) as i32;
670 let i_alt =
671 ((((curz[3].wrapping_sub(curz[2])) as u32) >> 31) as i32).wrapping_add(2);
672 let delta = curz[i_alt as usize].wrapping_sub(curz[besti as usize]);
673 besti = besti.wrapping_add((delta >> 31) & (i_alt - besti));
674 z = curz[besti as usize];
675 if z >= MAXZDIM {
676 break;
677 }
678
679 // Maybe begin a new slab in tbuf.
680 if cstat == 0 && (z >> 1) >= ((oz + 1) >> 1) {
681 if oz >= 0 {
682 tbuf[oldn] = ((n - oldn) >> 2) as u8;
683 tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
684 // tbuf[n+3] = (oz + 1) >> 1 — z0 of the slab
685 // we're ABOUT to write next.
686 ensure_capacity(&mut tbuf, n + 4);
687 tbuf[n + 3] = (((oz + 1) >> 1) & 0xff) as u8;
688 oldn = n;
689 n += 4;
690 }
691 ensure_capacity(&mut tbuf, oldn + 4);
692 tbuf[oldn] = 0;
693 let initial = ((z >> 1) & 0xff) as u8;
694 tbuf[oldn + 1] = initial;
695 tbuf[oldn + 2] = initial;
696 cz = -1;
697 }
698
699 if cstat & 0x1111 != 0 {
700 let tbuf_z1c = i32::from(tbuf[oldn + 2]);
701 if (tbuf_z1c << 1) + 1 >= oz && cz < 0 {
702 // Continue the floor list: emit averaged
703 // colours per zz until we catch up.
704 while (i32::from(tbuf[oldn + 2]) << 1) < z {
705 let zz = i32::from(tbuf[oldn + 2]) as usize;
706 let n_vox = mixn[zz] as usize;
707 // Voxlap requires n_vox >= 1 here. If it
708 // somehow lands at 0, write zero — keeps
709 // the slab walker invariants intact.
710 let avg = if n_vox == 0 {
711 0
712 } else {
713 let lo = zz * MIXC_LANES;
714 average_packed_colours(&mixc[lo..lo + n_vox], n_vox)
715 };
716 mixn[zz] = 0;
717 ensure_capacity(&mut tbuf, n + 4);
718 tbuf[n..n + 4].copy_from_slice(&avg.to_le_bytes());
719 tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_add(1);
720 n += 4;
721 }
722 } else {
723 if cz < 0 {
724 cz = oz >> 1;
725 } else if (cz << 1) + 1 < oz {
726 // Insert fake (single-voxel?) slab boundary.
727 tbuf[oldn] = ((n - oldn) >> 2) as u8;
728 tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
729 ensure_capacity(&mut tbuf, n + 4);
730 tbuf[n] = 0;
731 let cz_byte = (cz & 0xff) as u8;
732 tbuf[n + 1] = cz_byte;
733 tbuf[n + 2] = cz_byte;
734 tbuf[n + 3] = cz_byte;
735 oldn = n;
736 n += 4;
737 cz = oz >> 1;
738 }
739 while (cz << 1) < z {
740 let zz = cz as usize;
741 let n_vox = mixn[zz] as usize;
742 let avg = if n_vox == 0 {
743 0
744 } else {
745 let lo = zz * MIXC_LANES;
746 average_packed_colours(&mixc[lo..lo + n_vox], n_vox)
747 };
748 mixn[zz] = 0;
749 ensure_capacity(&mut tbuf, n + 4);
750 tbuf[n..n + 4].copy_from_slice(&avg.to_le_bytes());
751 cz += 1;
752 n += 4;
753 }
754 }
755 }
756
757 // State machine update for besti (voxlap5.c:4887-4908).
758 let bit_pos = (besti << 2) as i32;
759 cstat = ((1i32 << bit_pos).wrapping_add(cstat)) & 0x3333;
760 let state = (cstat >> bit_pos) & 3;
761 let bi = besti as usize;
762 match state {
763 0 => curz[bi] = curzn[bi][0],
764 1 => curz[bi] = curzn[bi][1],
765 2 => {
766 let tv = v_offset[bi];
767 if data[tv] == 0 {
768 curz[bi] = MAXZDIM;
769 } else {
770 let n_floor = i32::from(data[tv + 2]) - i32::from(data[tv + 1]) + 1;
771 let i_carry = n_floor - (i32::from(data[tv]) - 1);
772 let new_tv = tv + ((i32::from(data[tv]) as usize) << 2);
773 curz[bi] = i32::from(data[new_tv + 3]) + i_carry;
774 curzn[bi][3] = i32::from(data[new_tv + 3]);
775 curzn[bi][0] = i32::from(data[new_tv + 1]);
776 curzn[bi][1] = i32::from(data[new_tv + 2]) + 1;
777 v_offset[bi] = new_tv;
778 }
779 }
780 3 => curz[bi] = curzn[bi][3],
781 _ => unreachable!("state is masked to 0..=3"),
782 }
783 }
784
785 // After loop: emit the final slab tail (voxlap5.c:4910-4918).
786 tbuf[oldn + 2] = tbuf[oldn + 2].wrapping_sub(1);
787 if cz >= 0 {
788 tbuf[oldn] = ((n - oldn) >> 2) as u8;
789 ensure_capacity(&mut tbuf, n + 4);
790 tbuf[n] = 0;
791 let cz_byte = (cz & 0xff) as u8;
792 tbuf[n + 1] = cz_byte;
793 tbuf[n + 2] = (cz - 1) as u8;
794 tbuf[n + 3] = cz_byte;
795 n += 4;
796 }
797
798 // Commit this column's slab bytes to new_data.
799 let col_start = data_base
800 + u32::try_from(new_data.len()).expect("mip data fits in u32 byte addressing");
801 new_offsets.push(col_start);
802 new_data.extend_from_slice(&tbuf[..n]);
803 }
804 }
805 new_offsets.push(
806 data_base + u32::try_from(new_data.len()).expect("mip data fits in u32 byte addressing"),
807 );
808
809 (new_data, new_offsets)
810}
811
812/// Grow `tbuf` so that index `len_inclusive - 1` is a valid write.
813fn ensure_capacity(tbuf: &mut Vec<u8>, len_inclusive: usize) {
814 if tbuf.len() < len_inclusive {
815 tbuf.resize(len_inclusive, 0);
816 }
817}
818
819/// Errors returned by [`parse`].
820#[derive(Debug, Clone, PartialEq, Eq)]
821pub enum ParseError {
822 /// File too small to even contain the 108-byte header.
823 TooSmall { got: usize },
824 /// Magic bytes are not `0x09072000`.
825 BadMagic { got: u32 },
826 /// xdim and ydim disagree (file format requires square maps).
827 NonSquareVsid { x: u32, y: u32 },
828 /// A read of `need` bytes at offset `at` would run past EOF.
829 Truncated { at: usize, need: usize },
830 /// While walking column `idx`'s slab chain, the cursor at offset
831 /// `at` would have run past the end of the column data region.
832 BadColumn { idx: u32, at: usize },
833 /// File total size > `u32::MAX`. The internal `column_offset`
834 /// table uses `u32` because realistic maps fit comfortably.
835 FileTooLarge { got: usize },
836}
837
838impl fmt::Display for ParseError {
839 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
840 match *self {
841 Self::TooSmall { got } => write!(
842 f,
843 "vxl file too small ({got} bytes; need at least 108 byte header)"
844 ),
845 Self::BadMagic { got } => {
846 write!(f, "vxl bad magic: got {got:#010x}, expected 0x09072000")
847 }
848 Self::NonSquareVsid { x, y } => write!(
849 f,
850 "vxl non-square dimensions: xdim={x}, ydim={y} (must be equal)"
851 ),
852 Self::Truncated { at, need } => {
853 write!(f, "vxl truncated: need {need} bytes at offset {at}")
854 }
855 Self::BadColumn { idx, at } => write!(
856 f,
857 "vxl column {idx}: slab walker overran data region at offset {at}"
858 ),
859 Self::FileTooLarge { got } => write!(
860 f,
861 "vxl file size {got} exceeds {} bytes that this parser handles",
862 u32::MAX
863 ),
864 }
865 }
866}
867
868impl std::error::Error for ParseError {}
869
870impl From<OutOfBounds> for ParseError {
871 fn from(e: OutOfBounds) -> Self {
872 Self::Truncated {
873 at: e.at,
874 need: e.need,
875 }
876 }
877}
878
879/// Parse a `.vxl` file's bytes into a [`Vxl`].
880///
881/// # Errors
882///
883/// Returns [`ParseError`] if `bytes` is shorter than the 108-byte
884/// header, if the magic mismatches, if xdim ≠ ydim, if the file size
885/// exceeds `u32::MAX` bytes, if a header field would run past EOF, or
886/// if any column's slab chain runs off the end of the data region.
887///
888/// # Panics
889///
890/// Cannot panic on valid input: `pos` is bounded by `data.len()` which
891/// the [`ParseError::FileTooLarge`] gate at the top of the function
892/// proves fits in `u32`. The internal `expect` calls would only fire
893/// on a logic bug in the walker.
894///
895/// # Examples
896///
897/// ```no_run
898/// use roxlap_formats::vxl;
899///
900/// let bytes = std::fs::read("oracle.vxl")?;
901/// let world = vxl::parse(&bytes)?;
902/// println!(
903/// "{}×{} VSID, {} mip levels, camera at {:?}",
904/// world.vsid,
905/// world.vsid,
906/// world.mip_base_offsets.len() - 1,
907/// world.ipo,
908/// );
909/// # Ok::<(), Box<dyn std::error::Error>>(())
910/// ```
911pub fn parse(bytes: &[u8]) -> Result<Vxl, ParseError> {
912 if bytes.len() < HEADER_LEN {
913 return Err(ParseError::TooSmall { got: bytes.len() });
914 }
915 if u32::try_from(bytes.len()).is_err() {
916 return Err(ParseError::FileTooLarge { got: bytes.len() });
917 }
918
919 let mut cur = Cursor::new(bytes);
920 let magic = cur.read_u32()?;
921 if magic != MAGIC {
922 return Err(ParseError::BadMagic { got: magic });
923 }
924 let xdim = cur.read_u32()?;
925 let ydim = cur.read_u32()?;
926 if xdim != ydim {
927 return Err(ParseError::NonSquareVsid { x: xdim, y: ydim });
928 }
929 let vsid = xdim;
930
931 let ipo = read_dpoint3d(&mut cur)?;
932 let ist = read_dpoint3d(&mut cur)?;
933 let ihe = read_dpoint3d(&mut cur)?;
934 let ifo = read_dpoint3d(&mut cur)?;
935
936 // Column data begins immediately after the header and runs to EOF.
937 let data_start = cur.pos;
938 let data: Box<[u8]> = bytes[data_start..].to_vec().into_boxed_slice();
939
940 let n_cols = (vsid as usize) * (vsid as usize);
941 let mut column_offset = Vec::with_capacity(n_cols + 1);
942 let mut pos = 0usize;
943 for i in 0..n_cols {
944 column_offset.push(u32::try_from(pos).expect("data offset within u32"));
945 loop {
946 if pos + 4 > data.len() {
947 return Err(ParseError::BadColumn {
948 idx: u32::try_from(i).unwrap_or(u32::MAX),
949 at: pos,
950 });
951 }
952 let nextptr = data[pos];
953 if nextptr == 0 {
954 // Last slab. Length = 4 + n_floor * 4 where
955 // n_floor = max(0, z1c - z1 + 1).
956 let z1 = data[pos + 1];
957 let z1c = data[pos + 2];
958 // n_floor = max(0, z1c - z1 + 1). Promote to i32 to
959 // avoid u8 underflow when z1c == z1 - 1 (the "no floor
960 // colours" case voxlap allows).
961 let n_floor_signed = i32::from(z1c) - i32::from(z1) + 1;
962 let n_floor = usize::try_from(n_floor_signed.max(0))
963 .expect("n_floor non-negative after .max(0)");
964 let last_size = 4 + n_floor * 4;
965 if pos + last_size > data.len() {
966 return Err(ParseError::BadColumn {
967 idx: u32::try_from(i).unwrap_or(u32::MAX),
968 at: pos,
969 });
970 }
971 pos += last_size;
972 break;
973 }
974 let advance = usize::from(nextptr) * 4;
975 // Guard against `nextptr * 4 < 4` which would loop forever.
976 if advance < 4 {
977 return Err(ParseError::BadColumn {
978 idx: u32::try_from(i).unwrap_or(u32::MAX),
979 at: pos,
980 });
981 }
982 pos += advance;
983 }
984 }
985 column_offset.push(u32::try_from(pos).expect("data offset within u32"));
986
987 let mip_base_offsets = Box::new([0usize, n_cols + 1]);
988 Ok(Vxl {
989 vsid,
990 ipo,
991 ist,
992 ihe,
993 ifo,
994 data,
995 column_offset: column_offset.into_boxed_slice(),
996 mip_base_offsets,
997 vbit: Box::new([]),
998 vbiti: 0,
999 })
1000}
1001
1002/// Serialise a [`Vxl`] back to bytes. Round-trips byte-equally with
1003/// the input that produced this `Vxl` via [`parse`].
1004#[must_use]
1005pub fn serialize(vxl: &Vxl) -> Vec<u8> {
1006 let mut out = Vec::with_capacity(HEADER_LEN + vxl.data.len());
1007 out.extend_from_slice(&MAGIC.to_le_bytes());
1008 out.extend_from_slice(&vxl.vsid.to_le_bytes());
1009 out.extend_from_slice(&vxl.vsid.to_le_bytes());
1010 write_dpoint3d(&mut out, &vxl.ipo);
1011 write_dpoint3d(&mut out, &vxl.ist);
1012 write_dpoint3d(&mut out, &vxl.ihe);
1013 write_dpoint3d(&mut out, &vxl.ifo);
1014 out.extend_from_slice(&vxl.data);
1015 out
1016}
1017
1018fn read_dpoint3d(cur: &mut Cursor<'_>) -> Result<[f64; 3], OutOfBounds> {
1019 let mut out = [0.0f64; 3];
1020 for v in &mut out {
1021 let buf = cur.read_bytes(8)?;
1022 *v = f64::from_bits(u64::from_le_bytes([
1023 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
1024 ]));
1025 }
1026 Ok(out)
1027}
1028
1029fn write_dpoint3d(out: &mut Vec<u8>, p: &[f64; 3]) {
1030 for v in p {
1031 out.extend_from_slice(&v.to_bits().to_le_bytes());
1032 }
1033}
1034
1035// --- tests --------------------------------------------------------------
1036
1037#[cfg(test)]
1038mod tests {
1039 use std::io::Read;
1040
1041 use flate2::read::GzDecoder;
1042
1043 use super::*;
1044
1045 /// Gzipped `oracle.vxl` produced by voxlaptest's oracle binary
1046 /// (run with `ROXLAP_SAVE_VXL=oracle.vxl`). At VSID = 2048 the raw
1047 /// file is ~37 MB but compresses to ~200 KB thanks to the largely
1048 /// uniform solid block surrounding the 448³ playable carve.
1049 const ORACLE_VXL_GZ: &[u8] = include_bytes!("../../../assets/oracle.vxl.gz");
1050
1051 fn decode_oracle() -> Vec<u8> {
1052 let mut decoder = GzDecoder::new(ORACLE_VXL_GZ);
1053 let mut out = Vec::with_capacity(40 * 1024 * 1024);
1054 decoder.read_to_end(&mut out).expect("ungzip oracle.vxl.gz");
1055 out
1056 }
1057
1058 #[test]
1059 fn parse_oracle_header() {
1060 let bytes = decode_oracle();
1061 let vxl = parse(&bytes).expect("parse oracle.vxl");
1062 // voxlaptest's fork uses VSID = 2048.
1063 assert_eq!(vxl.vsid, 2048);
1064 // The placeholder camera vectors written by oracle.c match the
1065 // values we set in tests/oracle/oracle.c's savevxl call. Compare
1066 // bit patterns to dodge clippy::float_cmp — these are exact
1067 // integer-valued doubles so the comparison is well-defined.
1068 let bits = |a: [f64; 3]| a.map(f64::to_bits);
1069 assert_eq!(bits(vxl.ipo), bits([1024.0, 1024.0, 128.0]));
1070 assert_eq!(bits(vxl.ist), bits([1.0, 0.0, 0.0]));
1071 assert_eq!(bits(vxl.ihe), bits([0.0, 0.0, 1.0]));
1072 assert_eq!(bits(vxl.ifo), bits([0.0, 1.0, 0.0]));
1073 // 2048 * 2048 = 4_194_304 columns; column_offset has one extra entry.
1074 assert_eq!(vxl.column_offset.len(), 4_194_304 + 1);
1075 }
1076
1077 #[test]
1078 fn oracle_columns_partition_data_exactly() {
1079 let bytes = decode_oracle();
1080 let vxl = parse(&bytes).expect("parse oracle.vxl");
1081 // First column starts at offset 0, last sentinel equals data.len().
1082 assert_eq!(vxl.column_offset[0], 0);
1083 assert_eq!(
1084 vxl.column_offset[vxl.column_offset.len() - 1] as usize,
1085 vxl.data.len()
1086 );
1087 // Every column has at least one 4-byte slab header.
1088 let n_cols = (vxl.vsid as usize) * (vxl.vsid as usize);
1089 let min_col_len = (0..n_cols)
1090 .map(|i| vxl.column_data(i).len())
1091 .min()
1092 .expect("at least one column");
1093 assert!(min_col_len >= 4);
1094 }
1095
1096 #[test]
1097 fn oracle_solid_corner_column_has_minimal_slab() {
1098 let bytes = decode_oracle();
1099 let vxl = parse(&bytes).expect("parse oracle.vxl");
1100 // The carve in tests/oracle/oracle.c is at x=800..1248, y=800..1248.
1101 // Column (0, 0) is well outside that range — solid block all the
1102 // way down, so its slab list should be the minimal one-slab form.
1103 let col = vxl.column_data(0);
1104 // Last slab nextptr == 0 must occur somewhere; the simplest valid
1105 // column is exactly one slab (header only or header + a few colours).
1106 // We assert column length is small (≤ 32 bytes — much less than a
1107 // carved column would be).
1108 assert!(
1109 col.len() <= 32,
1110 "solid corner column should be tiny; got {} bytes",
1111 col.len()
1112 );
1113 }
1114
1115 #[test]
1116 fn oracle_roundtrips_byte_equal() {
1117 let bytes = decode_oracle();
1118 let vxl = parse(&bytes).expect("parse oracle.vxl");
1119 let out = serialize(&vxl);
1120 assert_eq!(out.len(), bytes.len(), "length differs");
1121 assert_eq!(out, bytes, "byte content differs");
1122 }
1123
1124 #[test]
1125 fn parse_truncated_header_fails() {
1126 let r = parse(&[0u8; 32]);
1127 assert!(matches!(r, Err(ParseError::TooSmall { .. })));
1128 }
1129
1130 #[test]
1131 fn parse_bad_magic_fails() {
1132 let mut bad = decode_oracle();
1133 bad[0] ^= 0xff;
1134 let r = parse(&bad);
1135 assert!(matches!(r, Err(ParseError::BadMagic { .. })));
1136 }
1137
1138 /// Minimal valid Vxl with `vsid = 2`, four columns, each one slab
1139 /// with a single floor voxel at z = 10. Every column carries a
1140 /// distinct BGRA colour.
1141 fn build_synthetic_2x2(colours: [u32; 4]) -> Vxl {
1142 // Per-column slab bytes: header [0, 10, 10, 0] + colour record
1143 // (4 bytes) = 8 bytes per column. 4 columns = 32 bytes total.
1144 let mut data = Vec::with_capacity(32);
1145 for col_colour in colours {
1146 data.extend_from_slice(&[0, 10, 10, 0]);
1147 data.extend_from_slice(&col_colour.to_le_bytes());
1148 }
1149 let column_offset: Box<[u32]> = vec![0u32, 8, 16, 24, 32].into_boxed_slice();
1150 Vxl {
1151 vsid: 2,
1152 ipo: [0.0; 3],
1153 ist: [1.0, 0.0, 0.0],
1154 ihe: [0.0, 0.0, 1.0],
1155 ifo: [0.0, 1.0, 0.0],
1156 data: data.into_boxed_slice(),
1157 column_offset,
1158 mip_base_offsets: Box::new([0, 5]),
1159 vbit: Box::new([]),
1160 vbiti: 0,
1161 }
1162 }
1163
1164 #[test]
1165 fn generate_mips_skips_when_max_le_1() {
1166 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1167 let before_data_len = vxl.data.len();
1168 vxl.generate_mips(0);
1169 vxl.generate_mips(1);
1170 assert_eq!(vxl.mip_count(), 1);
1171 assert_eq!(vxl.data.len(), before_data_len);
1172 assert_eq!(vxl.mip_base_offsets.as_ref(), &[0usize, 5]);
1173 }
1174
1175 #[test]
1176 fn generate_mips_2x2_produces_one_voxel_at_z5() {
1177 // Source: 4 columns, each one floor voxel at z = 10 with a
1178 // unique BGRA colour. Mip-1 collapses them to one column with
1179 // a single voxel at z = 5 (= 10 >> 1) coloured by the average.
1180 let colours = [
1181 0x0001_0101u32,
1182 0x0002_0202u32,
1183 0x0003_0303u32,
1184 0x0004_0404u32,
1185 ];
1186 let mut vxl = build_synthetic_2x2(colours);
1187 vxl.generate_mips(2);
1188
1189 assert_eq!(vxl.mip_count(), 2);
1190 // Mip-0 sub-table preserved.
1191 assert_eq!(vxl.column_offset_for_mip(0).len(), 5);
1192 // Mip-1 has 1 column + sentinel = 2 entries.
1193 assert_eq!(vxl.column_offset_for_mip(1).len(), 2);
1194
1195 // Single mip-1 column: header + 1 voxel = 8 bytes.
1196 let col = vxl.column_data_for_mip(1, 0);
1197 assert_eq!(col.len(), 8, "mip-1 column bytes: {col:?}");
1198 // Header: nextptr=0 (last slab), z1=z1c=5, z0=0 (dummy).
1199 assert_eq!(col[0], 0);
1200 assert_eq!(col[1], 5);
1201 assert_eq!(col[2], 5);
1202 assert_eq!(col[3], 0);
1203
1204 // Voxel colour: average of inputs through voxlap's QMULMIP[3]
1205 // kernel. Sum of B/G/R is 1+2+3+4 = 10 per channel; sum of A
1206 // bytes is 0. avg(B/G/R) = ((10*2+1) * 0x2000) >> 16 = 2;
1207 // avg(A) = ((0*2+1) * 0x2000) >> 16 = 0.
1208 assert_eq!(col[4], 2, "B");
1209 assert_eq!(col[5], 2, "G");
1210 assert_eq!(col[6], 2, "R");
1211 assert_eq!(col[7], 0, "A");
1212 }
1213
1214 #[test]
1215 fn generate_mips_idempotent_across_calls() {
1216 // Second invocation should yield bit-identical state.
1217 let colours = [0x10u32, 0x20, 0x30, 0x40];
1218 let mut a = build_synthetic_2x2(colours);
1219 let mut b = build_synthetic_2x2(colours);
1220 a.generate_mips(2);
1221 b.generate_mips(2);
1222 b.generate_mips(2);
1223 assert_eq!(a.data, b.data);
1224 assert_eq!(a.column_offset, b.column_offset);
1225 assert_eq!(a.mip_base_offsets, b.mip_base_offsets);
1226 }
1227
1228 #[test]
1229 fn generate_mips_oracle_full_depth() {
1230 // Smoke test: oracle.vxl is 2048×2048, so 4 mips fit
1231 // (2048 → 1024 → 512 → 256). Verify each mip's sub-table
1232 // sizing and that mip-0 stays untouched.
1233 let bytes = decode_oracle();
1234 let mut vxl = parse(&bytes).expect("parse oracle.vxl");
1235 let mip0_data_len = vxl.column_offset[(2048 * 2048) as usize] as usize;
1236 let mip0_data_snapshot = vxl.data[..mip0_data_len].to_vec();
1237
1238 vxl.generate_mips(4);
1239 assert_eq!(vxl.mip_count(), 4);
1240 for mip in 0..4u32 {
1241 let dim = (2048u32 >> mip) as usize;
1242 assert_eq!(
1243 vxl.column_offset_for_mip(mip).len(),
1244 dim * dim + 1,
1245 "mip-{mip} offset table length"
1246 );
1247 }
1248 // Mip-0 byte data must be untouched (multi-mip layout appends).
1249 assert_eq!(&vxl.data[..mip0_data_len], &mip0_data_snapshot[..]);
1250 }
1251
1252 // ---- slab allocator (CD.2.0/2.1/2.2) ------------------------------
1253
1254 #[test]
1255 fn slng_single_slab_with_one_floor_voxel() {
1256 // [nextptr=0, z1=10, z1c=10, z0=0] + 4-byte colour = 8 bytes.
1257 let slab = [0u8, 10, 10, 0, 0xff, 0, 0, 0];
1258 assert_eq!(slng(&slab), 8);
1259 }
1260
1261 #[test]
1262 fn slng_single_slab_with_three_floor_voxels() {
1263 // [nextptr=0, z1=10, z1c=12, z0=0] + 3 colours = 16 bytes.
1264 let slab = [0u8, 10, 12, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0];
1265 assert_eq!(slng(&slab), 16);
1266 }
1267
1268 #[test]
1269 fn slng_single_slab_empty_floor() {
1270 // z1c < z1 → no floor colours; voxlap5.c stores n_floor as 0.
1271 // [nextptr=0, z1=10, z1c=9, z0=0] = 4 bytes only.
1272 let slab = [0u8, 10, 9, 0];
1273 assert_eq!(slng(&slab), 4);
1274 }
1275
1276 #[test]
1277 fn slng_two_slab_chain() {
1278 // Slab 0: nextptr=2 (8 bytes) + 1 ceiling colour record + ...
1279 // [2, 10, 11, 0, c0, c0, c0, c0]
1280 // Slab 1 (last): [0, 20, 22, 12] + 3 floor colours = 16 bytes
1281 // total = 8 + 16 = 24.
1282 let slab = [
1283 2u8, 10, 11, 0, // slab 0 header
1284 0xaa, 0, 0, 0, // ceiling colour
1285 0, 20, 22, 12, // slab 1 (last) header
1286 0xbb, 0, 0, 0, // floor 0
1287 0xcc, 0, 0, 0, // floor 1
1288 0xdd, 0, 0, 0, // floor 2
1289 ];
1290 assert_eq!(slng(&slab), 24);
1291 }
1292
1293 #[test]
1294 fn reserve_edit_capacity_grows_data_and_marks_existing() {
1295 let mut vxl = build_synthetic_2x2([0xaa, 0xbb, 0xcc, 0xdd]);
1296 let original_len = vxl.data.len();
1297 assert_eq!(original_len, 32);
1298
1299 vxl.reserve_edit_capacity(64);
1300
1301 // Data grew by aligned headroom.
1302 assert_eq!(vxl.data.len(), 32 + 64);
1303 // Existing bytes unchanged.
1304 assert_eq!(vxl.data[0..4], [0, 10, 10, 0]);
1305 // vbit sized = ceil(96 / 4 / 32) = 1.
1306 assert_eq!(vxl.vbit.len(), 1);
1307 // First 8 dwords (32 bytes / 4) marked allocated.
1308 // bits 0..=7 set → 0xff.
1309 assert_eq!(vxl.vbit[0], 0xff);
1310 // vbiti reset.
1311 assert_eq!(vxl.vbiti, 0);
1312 }
1313
1314 #[test]
1315 fn reserve_edit_capacity_aligns_headroom_up_to_dword() {
1316 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1317 // Asking for 5 bytes should round up to 8.
1318 vxl.reserve_edit_capacity(5);
1319 assert_eq!(vxl.data.len(), 32 + 8);
1320 }
1321
1322 #[test]
1323 fn voxalloc_returns_offset_in_headroom() {
1324 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1325 vxl.reserve_edit_capacity(64);
1326 let off = vxl.voxalloc(8);
1327 // First free dword is at byte offset 32 (just past the
1328 // 8 packed columns).
1329 assert_eq!(off, 32);
1330 // Bits 8 and 9 (dwords past existing data) now set.
1331 assert_eq!(vxl.vbit[0] & ((1 << 8) | (1 << 9)), (1 << 8) | (1 << 9));
1332 }
1333
1334 #[test]
1335 fn voxalloc_successive_returns_non_overlapping() {
1336 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1337 vxl.reserve_edit_capacity(128);
1338 let a = vxl.voxalloc(8);
1339 let b = vxl.voxalloc(8);
1340 let c = vxl.voxalloc(16);
1341 assert_eq!(a, 32);
1342 assert_eq!(b, 40);
1343 assert_eq!(c, 48);
1344 }
1345
1346 #[test]
1347 fn voxdealloc_clears_bits() {
1348 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1349 vxl.reserve_edit_capacity(128);
1350 let off = vxl.voxalloc(8);
1351 // off=32 → dword 8, len=8 → dwords 8..10 marked allocated.
1352 assert_eq!((vxl.vbit[0] >> 8) & 1, 1);
1353 assert_eq!((vxl.vbit[0] >> 9) & 1, 1);
1354 // Plant a valid slab at off so slng can recover the length.
1355 // [nextptr=0, z1=5, z1c=5, z0=0] + 1 colour = 8 bytes.
1356 vxl.data[off as usize..off as usize + 8]
1357 .copy_from_slice(&[0, 5, 5, 0, 0xa1, 0xa2, 0xa3, 0xa4]);
1358 vxl.voxdealloc(off);
1359 assert_eq!((vxl.vbit[0] >> 8) & 1, 0);
1360 assert_eq!((vxl.vbit[0] >> 9) & 1, 0);
1361 }
1362
1363 #[test]
1364 fn voxdealloc_freed_region_reused_after_full_scan() {
1365 // Pool: 32 (existing) + 32 headroom = 64 bytes = 16 dwords.
1366 // Three 8-byte allocs fill dwords 8..14; vbiti reaches vend=14.
1367 // After freeing alloc #1, the next voxalloc resets vbiti to 0
1368 // and finds the freed dwords on the second pass.
1369 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1370 vxl.reserve_edit_capacity(32);
1371 let a = vxl.voxalloc(8);
1372 let _b = vxl.voxalloc(8);
1373 let _c = vxl.voxalloc(8);
1374 vxl.data[a as usize..a as usize + 8].copy_from_slice(&[0, 5, 5, 0, 0xa1, 0xa2, 0xa3, 0xa4]);
1375 vxl.voxdealloc(a);
1376 let reused = vxl.voxalloc(8);
1377 assert_eq!(reused, a, "freed region should be reused on rescan");
1378 }
1379
1380 #[test]
1381 fn voxdealloc_cross_word_boundary() {
1382 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1383 // Build a ~200-byte pool: 32 (existing) + 192 headroom = 224 bytes
1384 // = 56 dwords. Words 0..=1 cover this — dword 31 is bit 31 of word 0,
1385 // dword 32 is bit 0 of word 1.
1386 vxl.reserve_edit_capacity(192);
1387 // Force vbiti up so successive allocs cross word boundary 31/32.
1388 // At this point bits 0..=7 are set (existing columns).
1389 // Allocate 24 dwords (96 bytes) starting at dword 8 → covers
1390 // dwords 8..=31 (still in word 0).
1391 let _ = vxl.voxalloc(96);
1392 assert_eq!(vxl.vbiti, 32);
1393 // Allocate another 16 bytes (4 dwords) — covers dwords 32..=35
1394 // in word 1.
1395 let off = vxl.voxalloc(16);
1396 assert_eq!(off, 32 * 4); // dword 32 → byte 128
1397 // Place a slab whose slng = 16 at off.
1398 // [nextptr=0, z1=0, z1c=2, z0=0] + 3 colours = 16 bytes.
1399 vxl.data[off as usize..off as usize + 16]
1400 .copy_from_slice(&[0, 0, 2, 0, 0xa, 0, 0, 0, 0xb, 0, 0, 0, 0xc, 0, 0, 0]);
1401 // The dword range to clear is [32, 36) — but it's within
1402 // word 1 entirely, so single-word path. Construct a different
1403 // case: free a span that crosses 31/32.
1404 // We need an allocation that spans dwords 30..=33 (cross word).
1405 // Reset: do a fresh upgrade.
1406 let mut vxl2 = build_synthetic_2x2([1, 2, 3, 4]);
1407 vxl2.reserve_edit_capacity(192);
1408 // Skip past existing 8 dwords with a 22-dword pad alloc.
1409 let pad = vxl2.voxalloc(22 * 4);
1410 assert_eq!(pad, 32);
1411 // Now allocate 16 bytes (4 dwords) at dword 30 — crosses 31/32.
1412 let cross = vxl2.voxalloc(16);
1413 assert_eq!(cross, 30 * 4);
1414 // Verify bits 30, 31 in word 0 and bits 0, 1 in word 1 set.
1415 assert!(
1416 (vxl2.vbit[0] >> 30) & 1 == 1
1417 && (vxl2.vbit[0] >> 31) & 1 == 1
1418 && vxl2.vbit[1] & 1 == 1
1419 && (vxl2.vbit[1] >> 1) & 1 == 1,
1420 "bits across word boundary should all be set"
1421 );
1422 // Plant a 16-byte slab so slng works.
1423 vxl2.data[cross as usize..cross as usize + 16]
1424 .copy_from_slice(&[0, 0, 2, 0, 0xa, 0, 0, 0, 0xb, 0, 0, 0, 0xc, 0, 0, 0]);
1425 vxl2.voxdealloc(cross);
1426 // Cross-boundary bits cleared.
1427 assert_eq!((vxl2.vbit[0] >> 30) & 1, 0);
1428 assert_eq!((vxl2.vbit[0] >> 31) & 1, 0);
1429 assert_eq!(vxl2.vbit[1] & 1, 0);
1430 assert_eq!((vxl2.vbit[1] >> 1) & 1, 0);
1431 }
1432
1433 #[test]
1434 #[should_panic(expected = "voxalloc requires reserve_edit_capacity")]
1435 fn voxalloc_panics_without_reserve() {
1436 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1437 let _ = vxl.voxalloc(8);
1438 }
1439
1440 #[test]
1441 #[should_panic(expected = "voxalloc n_bytes must be a positive multiple of 4")]
1442 fn voxalloc_panics_on_bad_size() {
1443 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1444 vxl.reserve_edit_capacity(64);
1445 let _ = vxl.voxalloc(7);
1446 }
1447
1448 #[test]
1449 #[should_panic(expected = "voxalloc: vbuf full")]
1450 fn voxalloc_panics_when_pool_full() {
1451 let mut vxl = build_synthetic_2x2([1, 2, 3, 4]);
1452 // Headroom = 16 bytes = 4 dwords. Alloc 8 bytes twice → 4 dwords used,
1453 // 0 left. Third alloc must fail.
1454 vxl.reserve_edit_capacity(16);
1455 let _ = vxl.voxalloc(8);
1456 let _ = vxl.voxalloc(8);
1457 let _ = vxl.voxalloc(8); // panics
1458 }
1459}