Skip to main content

herkos_runtime/
memory.rs

1//! WebAssembly linear memory — `IsolatedMemory<const MAX_PAGES: usize>`.
2//!
3//! The backing array is `[[u8; PAGE_SIZE]; MAX_PAGES]` — a 2D array that
4//! is contiguous in memory. We use `as_flattened()` (stable since Rust 1.80)
5//! to get a flat `&[u8]` view for the inner functions.
6//!
7//! Note: the spec shows `[u8; MAX_PAGES * PAGE_SIZE]` but that requires the
8//! unstable `generic_const_exprs` feature. The 2D array achieves identical
9//! layout on stable Rust.
10//!
11//! Load/store operations use the **outline pattern** (§13.3): the generic
12//! wrapper delegates to a non-generic inner function so that only one copy
13//! of the actual bounds-checking logic exists in the binary.
14
15use crate::{WasmResult, WasmTrap, PAGE_SIZE};
16
17/// Isolated linear memory for a single Wasm module.
18///
19/// `MAX_PAGES` is the compile-time maximum (from the Wasm module's declared
20/// maximum or a CLI override). The backing array is fully pre-allocated.
21pub struct IsolatedMemory<const MAX_PAGES: usize> {
22    /// Backing storage — `MAX_PAGES` pages of `PAGE_SIZE` bytes each.
23    /// Contiguous in memory, identical layout to `[u8; MAX_PAGES * PAGE_SIZE]`.
24    pages: [[u8; PAGE_SIZE]; MAX_PAGES],
25    /// Number of currently active pages. Starts at `initial_pages`,
26    /// incremented by `grow`. Accesses beyond `active_pages * PAGE_SIZE`
27    /// are out-of-bounds traps.
28    active_pages: usize,
29}
30
31impl<const MAX_PAGES: usize> IsolatedMemory<MAX_PAGES> {
32    /// Create a new `IsolatedMemory` with `initial_pages` active.
33    ///
34    /// # Errors
35    /// Returns `ConstructionError::MemoryInitialPagesExceedsMax` if `initial_pages > MAX_PAGES`.
36    #[inline(never)]
37    pub fn try_new(initial_pages: usize) -> Result<Self, crate::ConstructionError> {
38        if initial_pages > MAX_PAGES {
39            return Err(crate::ConstructionError::MemoryInitialPagesExceedsMax {
40                initial: initial_pages,
41                max: MAX_PAGES,
42            });
43        }
44        Ok(Self {
45            pages: [[0u8; PAGE_SIZE]; MAX_PAGES],
46            active_pages: initial_pages,
47        })
48    }
49
50    /// Initialize an `IsolatedMemory` in-place within a caller-provided slot.
51    ///
52    /// Unlike `try_new`, this writes directly into `slot` without ever creating
53    /// a large `Result<Self, E>` on the call stack. Use this when `MAX_PAGES`
54    /// is large, to avoid stack overflow in debug builds.
55    ///
56    /// # Errors
57    /// Returns `ConstructionError::MemoryInitialPagesExceedsMax` if `initial_pages > MAX_PAGES`.
58    #[inline(never)]
59    pub fn try_init(
60        slot: &mut core::mem::MaybeUninit<Self>,
61        initial_pages: usize,
62    ) -> Result<(), crate::ConstructionError> {
63        if initial_pages > MAX_PAGES {
64            return Err(crate::ConstructionError::MemoryInitialPagesExceedsMax {
65                initial: initial_pages,
66                max: MAX_PAGES,
67            });
68        }
69        let ptr = slot.as_mut_ptr();
70        // SAFETY: ptr comes from MaybeUninit so it is valid for writes and
71        // correctly aligned. Both fields are written before the caller can
72        // call assume_init on the slot.
73        unsafe {
74            // Zero the entire pages array in-place — the compiler emits a
75            // single memset; no large stack temporary is created.
76            core::ptr::addr_of_mut!((*ptr).pages).write_bytes(0, 1);
77            core::ptr::addr_of_mut!((*ptr).active_pages).write(initial_pages);
78        }
79        Ok(())
80    }
81
82    /// Current number of active pages.
83    #[inline(always)]
84    pub fn page_count(&self) -> usize {
85        self.active_pages
86    }
87
88    /// Current active size in bytes.
89    #[inline(always)]
90    pub fn active_size(&self) -> usize {
91        self.active_pages * PAGE_SIZE
92    }
93
94    /// Wasm `memory.grow` — returns previous page count, or -1 on failure.
95    /// No allocation occurs: the backing array is already sized to `MAX_PAGES`.
96    pub fn grow(&mut self, delta: u32) -> i32 {
97        let old = self.active_pages;
98        let new = old.wrapping_add(delta as usize);
99        if new > MAX_PAGES {
100            return -1;
101        }
102        // Zero-init the new pages (Wasm spec requires it).
103        for page in &mut self.pages[old..new] {
104            page.fill(0);
105        }
106        self.active_pages = new;
107        old as i32
108    }
109
110    /// Wasm `memory.size` — returns current page count.
111    #[inline(always)]
112    pub fn size(&self) -> i32 {
113        self.active_pages as i32
114    }
115
116    /// Flat read-only view of the full backing memory.
117    #[inline(always)]
118    fn flat(&self) -> &[u8] {
119        self.pages.as_flattened()
120    }
121
122    /// Flat mutable view of the full backing memory.
123    #[inline(always)]
124    fn flat_mut(&mut self) -> &mut [u8] {
125        self.pages.as_flattened_mut()
126    }
127
128    // ── Bulk memory operations ────────────────────────────────────────
129
130    /// Wasm `memory.copy` — copy `len` bytes from `src` to `dst`.
131    ///
132    /// Semantics match `memmove`: overlapping source and destination regions
133    /// are handled correctly. Traps (`OutOfBounds`) if either region extends
134    /// beyond the current active memory.
135    pub fn memory_copy(&mut self, dst: u32, src: u32, len: u32) -> WasmResult<()> {
136        let active = self.active_size();
137        let dst = dst as usize;
138        let src = src as usize;
139        let len = len as usize;
140        if src.checked_add(len).is_none_or(|end| end > active)
141            || dst.checked_add(len).is_none_or(|end| end > active)
142        {
143            return Err(WasmTrap::OutOfBounds);
144        }
145        self.flat_mut().copy_within(src..src + len, dst);
146        Ok(())
147    }
148
149    // ── Bounds-checked (safe) load/store ──────────────────────────────
150
151    /// Load an i32 from linear memory with bounds checking.
152    #[inline(always)]
153    pub fn load_i32(&self, offset: usize) -> WasmResult<i32> {
154        load_i32_inner(self.flat(), self.active_size(), offset)
155    }
156
157    /// Load an i64 from linear memory with bounds checking.
158    #[inline(always)]
159    pub fn load_i64(&self, offset: usize) -> WasmResult<i64> {
160        load_i64_inner(self.flat(), self.active_size(), offset)
161    }
162
163    /// Load a u8 (i32.load8_u) from linear memory with bounds checking.
164    #[inline(always)]
165    pub fn load_u8(&self, offset: usize) -> WasmResult<u8> {
166        load_u8_inner(self.flat(), self.active_size(), offset)
167    }
168
169    /// Load a u16 (i32.load16_u) from linear memory with bounds checking.
170    #[inline(always)]
171    pub fn load_u16(&self, offset: usize) -> WasmResult<u16> {
172        load_u16_inner(self.flat(), self.active_size(), offset)
173    }
174
175    /// Load an f32 from linear memory with bounds checking.
176    #[inline(always)]
177    pub fn load_f32(&self, offset: usize) -> WasmResult<f32> {
178        load_f32_inner(self.flat(), self.active_size(), offset)
179    }
180
181    /// Load an f64 from linear memory with bounds checking.
182    #[inline(always)]
183    pub fn load_f64(&self, offset: usize) -> WasmResult<f64> {
184        load_f64_inner(self.flat(), self.active_size(), offset)
185    }
186
187    /// Store an i32 into linear memory with bounds checking.
188    #[inline(always)]
189    pub fn store_i32(&mut self, offset: usize, value: i32) -> WasmResult<()> {
190        let active = self.active_size();
191        store_i32_inner(self.flat_mut(), active, offset, value)
192    }
193
194    /// Store an i64 into linear memory with bounds checking.
195    #[inline(always)]
196    pub fn store_i64(&mut self, offset: usize, value: i64) -> WasmResult<()> {
197        let active = self.active_size();
198        store_i64_inner(self.flat_mut(), active, offset, value)
199    }
200
201    /// Store a u8 (i32.store8) into linear memory with bounds checking.
202    #[inline(always)]
203    pub fn store_u8(&mut self, offset: usize, value: u8) -> WasmResult<()> {
204        let active = self.active_size();
205        store_u8_inner(self.flat_mut(), active, offset, value)
206    }
207
208    /// Store a u16 (i32.store16) into linear memory with bounds checking.
209    #[inline(always)]
210    pub fn store_u16(&mut self, offset: usize, value: u16) -> WasmResult<()> {
211        let active = self.active_size();
212        store_u16_inner(self.flat_mut(), active, offset, value)
213    }
214
215    /// Store an f32 into linear memory with bounds checking.
216    #[inline(always)]
217    pub fn store_f32(&mut self, offset: usize, value: f32) -> WasmResult<()> {
218        let active = self.active_size();
219        store_f32_inner(self.flat_mut(), active, offset, value)
220    }
221
222    /// Store an f64 into linear memory with bounds checking.
223    #[inline(always)]
224    pub fn store_f64(&mut self, offset: usize, value: f64) -> WasmResult<()> {
225        let active = self.active_size();
226        store_f64_inner(self.flat_mut(), active, offset, value)
227    }
228
229    /// Initialize a region of memory from a byte slice (Wasm data segment).
230    ///
231    /// Copies `data` into linear memory starting at `offset`. Equivalent to
232    /// calling `store_u8` for each byte, but avoids emitting N separate
233    /// function calls in generated code.
234    ///
235    /// # Errors
236    /// Returns `Err(WasmTrap::OutOfBounds)` if `offset + data.len()` exceeds
237    /// `active_pages * PAGE_SIZE`.
238    #[inline(always)]
239    pub fn init_data(&mut self, offset: usize, data: &[u8]) -> WasmResult<()> {
240        let active = self.active_size();
241        init_data_inner(self.flat_mut(), active, offset, data)
242    }
243
244    // ── Unchecked (verified) load/store ───────────────────────────────
245    //
246    // These skip bounds checking entirely. The caller MUST guarantee that
247    // the access is in-bounds, justified by a formal proof.
248
249    /// Load i32 without bounds checking.
250    ///
251    /// # Safety
252    /// Caller must guarantee `offset + 3 < active_size()`.
253    #[inline(always)]
254    pub unsafe fn load_i32_unchecked(&self, offset: usize) -> i32 {
255        load_i32_unchecked_inner(self.flat(), offset)
256    }
257
258    /// Load i64 without bounds checking.
259    ///
260    /// # Safety
261    /// Caller must guarantee `offset + 7 < active_size()`.
262    #[inline(always)]
263    pub unsafe fn load_i64_unchecked(&self, offset: usize) -> i64 {
264        load_i64_unchecked_inner(self.flat(), offset)
265    }
266
267    /// Store i32 without bounds checking.
268    ///
269    /// # Safety
270    /// Caller must guarantee `offset + 3 < active_size()`.
271    #[inline(always)]
272    pub unsafe fn store_i32_unchecked(&mut self, offset: usize, value: i32) {
273        store_i32_unchecked_inner(self.flat_mut(), offset, value)
274    }
275
276    /// Store i64 without bounds checking.
277    ///
278    /// # Safety
279    /// Caller must guarantee `offset + 7 < active_size()`.
280    #[inline(always)]
281    pub unsafe fn store_i64_unchecked(&mut self, offset: usize, value: i64) {
282        store_i64_unchecked_inner(self.flat_mut(), offset, value)
283    }
284
285    /// Read-only access to the active memory region.
286    #[inline(always)]
287    pub fn as_slice(&self) -> &[u8] {
288        &self.flat()[..self.active_size()]
289    }
290
291    /// Mutable access to the active memory region.
292    #[inline(always)]
293    pub fn as_mut_slice(&mut self) -> &mut [u8] {
294        let size = self.active_size();
295        &mut self.flat_mut()[..size]
296    }
297}
298
299// ── Helpers ───────────────────────────────────────────────────────────
300
301/// Bounds-check and return the sub-slice `memory[offset..offset+N]`.
302/// Returns `Err(OutOfBounds)` on overflow or out-of-range — never panics.
303#[inline(always)]
304fn checked_slice(
305    memory: &[u8],
306    active_bytes: usize,
307    offset: usize,
308    len: usize,
309) -> WasmResult<&[u8]> {
310    let end = offset.checked_add(len).ok_or(WasmTrap::OutOfBounds)?;
311    if end > active_bytes {
312        return Err(WasmTrap::OutOfBounds);
313    }
314    // SAFETY: we just verified end <= active_bytes <= memory.len().
315    // get() would also work but returns Option, adding another branch.
316    // This is safe because the bounds are proven above.
317    memory.get(offset..end).ok_or(WasmTrap::OutOfBounds)
318}
319
320/// Mutable variant of `checked_slice`.
321#[inline(always)]
322fn checked_slice_mut(
323    memory: &mut [u8],
324    active_bytes: usize,
325    offset: usize,
326    len: usize,
327) -> WasmResult<&mut [u8]> {
328    let end = offset.checked_add(len).ok_or(WasmTrap::OutOfBounds)?;
329    if end > active_bytes {
330        return Err(WasmTrap::OutOfBounds);
331    }
332    memory.get_mut(offset..end).ok_or(WasmTrap::OutOfBounds)
333}
334
335/// Convert a slice to a fixed-size array. Returns `Err(OutOfBounds)` if
336/// the length doesn't match — never panics.
337#[inline(always)]
338fn to_array<const N: usize>(slice: &[u8]) -> WasmResult<[u8; N]> {
339    slice.try_into().map_err(|_| WasmTrap::OutOfBounds)
340}
341
342// ── Non-generic inner functions (outline pattern, §13.3) ─────────────
343//
344// ONE copy of each function in the binary, regardless of how many
345// `MAX_PAGES` instantiations exist. The generic wrappers above compile
346// to a single call instruction each.
347//
348// No unwrap(), no indexing, no panic paths.
349
350#[inline(never)]
351fn load_i32_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<i32> {
352    let s = checked_slice(memory, active_bytes, offset, 4)?;
353    Ok(i32::from_le_bytes(to_array(s)?))
354}
355
356#[inline(never)]
357fn load_i64_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<i64> {
358    let s = checked_slice(memory, active_bytes, offset, 8)?;
359    Ok(i64::from_le_bytes(to_array(s)?))
360}
361
362#[inline(never)]
363fn load_u8_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<u8> {
364    let s = checked_slice(memory, active_bytes, offset, 1)?;
365    Ok(s[0])
366}
367
368#[inline(never)]
369fn load_u16_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<u16> {
370    let s = checked_slice(memory, active_bytes, offset, 2)?;
371    Ok(u16::from_le_bytes(to_array(s)?))
372}
373
374#[inline(never)]
375fn load_f32_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<f32> {
376    let s = checked_slice(memory, active_bytes, offset, 4)?;
377    Ok(f32::from_le_bytes(to_array(s)?))
378}
379
380#[inline(never)]
381fn load_f64_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<f64> {
382    let s = checked_slice(memory, active_bytes, offset, 8)?;
383    Ok(f64::from_le_bytes(to_array(s)?))
384}
385
386#[inline(never)]
387fn store_i32_inner(
388    memory: &mut [u8],
389    active_bytes: usize,
390    offset: usize,
391    value: i32,
392) -> WasmResult<()> {
393    let s = checked_slice_mut(memory, active_bytes, offset, 4)?;
394    s.copy_from_slice(&value.to_le_bytes());
395    Ok(())
396}
397
398#[inline(never)]
399fn store_i64_inner(
400    memory: &mut [u8],
401    active_bytes: usize,
402    offset: usize,
403    value: i64,
404) -> WasmResult<()> {
405    let s = checked_slice_mut(memory, active_bytes, offset, 8)?;
406    s.copy_from_slice(&value.to_le_bytes());
407    Ok(())
408}
409
410#[inline(never)]
411fn store_u8_inner(
412    memory: &mut [u8],
413    active_bytes: usize,
414    offset: usize,
415    value: u8,
416) -> WasmResult<()> {
417    let s = checked_slice_mut(memory, active_bytes, offset, 1)?;
418    s[0] = value;
419    Ok(())
420}
421
422#[inline(never)]
423fn store_u16_inner(
424    memory: &mut [u8],
425    active_bytes: usize,
426    offset: usize,
427    value: u16,
428) -> WasmResult<()> {
429    let s = checked_slice_mut(memory, active_bytes, offset, 2)?;
430    s.copy_from_slice(&value.to_le_bytes());
431    Ok(())
432}
433
434#[inline(never)]
435fn store_f32_inner(
436    memory: &mut [u8],
437    active_bytes: usize,
438    offset: usize,
439    value: f32,
440) -> WasmResult<()> {
441    let s = checked_slice_mut(memory, active_bytes, offset, 4)?;
442    s.copy_from_slice(&value.to_le_bytes());
443    Ok(())
444}
445
446#[inline(never)]
447fn store_f64_inner(
448    memory: &mut [u8],
449    active_bytes: usize,
450    offset: usize,
451    value: f64,
452) -> WasmResult<()> {
453    let s = checked_slice_mut(memory, active_bytes, offset, 8)?;
454    s.copy_from_slice(&value.to_le_bytes());
455    Ok(())
456}
457
458#[inline(never)]
459fn init_data_inner(
460    memory: &mut [u8],
461    active_bytes: usize,
462    offset: usize,
463    data: &[u8],
464) -> WasmResult<()> {
465    let dst = checked_slice_mut(memory, active_bytes, offset, data.len())?;
466    dst.copy_from_slice(data);
467    Ok(())
468}
469
470// ── Unchecked inner functions ─────────────────────────────────────────
471//
472// SAFETY: the caller (verified backend) guarantees the offset is in-bounds,
473// justified by a formal proof. These use get_unchecked
474// (no bounds check) and read_unaligned (no alignment requirement, matching
475// Wasm's unaligned memory semantics).
476
477#[inline(never)]
478unsafe fn load_i32_unchecked_inner(memory: &[u8], offset: usize) -> i32 {
479    let ptr = memory.as_ptr().add(offset) as *const i32;
480    i32::from_le(ptr.read_unaligned())
481}
482
483#[inline(never)]
484unsafe fn load_i64_unchecked_inner(memory: &[u8], offset: usize) -> i64 {
485    let ptr = memory.as_ptr().add(offset) as *const i64;
486    i64::from_le(ptr.read_unaligned())
487}
488
489#[inline(never)]
490unsafe fn store_i32_unchecked_inner(memory: &mut [u8], offset: usize, value: i32) {
491    let ptr = memory.as_mut_ptr().add(offset) as *mut i32;
492    ptr.write_unaligned(value.to_le());
493}
494
495#[inline(never)]
496unsafe fn store_i64_unchecked_inner(memory: &mut [u8], offset: usize, value: i64) {
497    let ptr = memory.as_mut_ptr().add(offset) as *mut i64;
498    ptr.write_unaligned(value.to_le());
499}
500
501#[cfg(test)]
502mod tests {
503    use super::*;
504
505    // Use MAX_PAGES=1 for tests — 1 page = 64 KiB, fits on stack in test.
506    type Mem = IsolatedMemory<1>;
507
508    #[test]
509    fn new_initializes_to_zero() {
510        let mem = Mem::try_new(1).unwrap();
511        assert_eq!(mem.page_count(), 1);
512        assert_eq!(mem.active_size(), PAGE_SIZE);
513        assert!(mem.as_slice().iter().all(|&b| b == 0));
514    }
515
516    #[test]
517    fn try_new_fails_if_initial_exceeds_max() {
518        let result = Mem::try_new(2);
519        assert!(result.is_err());
520        assert!(matches!(
521            result,
522            Err(crate::ConstructionError::MemoryInitialPagesExceedsMax { initial: 2, max: 1 })
523        ));
524    }
525
526    // ── grow ──
527
528    #[test]
529    fn grow_success() {
530        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
531        assert_eq!(mem.grow(2), 1); // old page count
532        assert_eq!(mem.page_count(), 3);
533    }
534
535    #[test]
536    fn grow_to_max() {
537        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
538        assert_eq!(mem.grow(3), 1);
539        assert_eq!(mem.page_count(), 4);
540    }
541
542    #[test]
543    fn grow_beyond_max_fails() {
544        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
545        assert_eq!(mem.grow(4), -1); // would be 5 pages > 4
546        assert_eq!(mem.page_count(), 1); // unchanged
547    }
548
549    #[test]
550    fn grow_zero_is_noop() {
551        let mut mem = Mem::try_new(1).unwrap();
552        assert_eq!(mem.grow(0), 1);
553        assert_eq!(mem.page_count(), 1);
554    }
555
556    #[test]
557    fn grow_zeroes_new_pages() {
558        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
559        assert_eq!(mem.grow(1), 1);
560        // Verify new page is zero via flat view
561        let flat = mem.flat();
562        let new_start = PAGE_SIZE;
563        let new_end = 2 * PAGE_SIZE;
564        assert!(flat[new_start..new_end].iter().all(|&b| b == 0));
565    }
566
567    #[test]
568    fn size_returns_page_count() {
569        let mem = IsolatedMemory::<4>::try_new(2).unwrap();
570        assert_eq!(mem.size(), 2);
571    }
572
573    // ── load/store i32 ──
574
575    #[test]
576    fn store_load_i32_roundtrip() {
577        let mut mem = Mem::try_new(1).unwrap();
578        mem.store_i32(100, 0x12345678).unwrap();
579        assert_eq!(mem.load_i32(100), Ok(0x12345678));
580    }
581
582    #[test]
583    fn load_i32_out_of_bounds() {
584        let mem = Mem::try_new(1).unwrap();
585        // Last valid offset for i32: PAGE_SIZE - 4
586        assert!(mem.load_i32(PAGE_SIZE - 4).is_ok());
587        assert_eq!(mem.load_i32(PAGE_SIZE - 3), Err(WasmTrap::OutOfBounds));
588        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
589    }
590
591    #[test]
592    fn store_i32_out_of_bounds() {
593        let mut mem = Mem::try_new(1).unwrap();
594        assert!(mem.store_i32(PAGE_SIZE - 4, 42).is_ok());
595        assert_eq!(mem.store_i32(PAGE_SIZE - 3, 42), Err(WasmTrap::OutOfBounds));
596    }
597
598    #[test]
599    fn load_i32_offset_overflow() {
600        let mem = Mem::try_new(1).unwrap();
601        assert_eq!(mem.load_i32(usize::MAX), Err(WasmTrap::OutOfBounds));
602    }
603
604    // ── load/store i64 ──
605
606    #[test]
607    fn store_load_i64_roundtrip() {
608        let mut mem = Mem::try_new(1).unwrap();
609        mem.store_i64(200, 0x0102030405060708i64).unwrap();
610        assert_eq!(mem.load_i64(200), Ok(0x0102030405060708i64));
611    }
612
613    #[test]
614    fn load_i64_out_of_bounds() {
615        let mem = Mem::try_new(1).unwrap();
616        assert!(mem.load_i64(PAGE_SIZE - 8).is_ok());
617        assert_eq!(mem.load_i64(PAGE_SIZE - 7), Err(WasmTrap::OutOfBounds));
618    }
619
620    // ── load/store u8 ──
621
622    #[test]
623    fn store_load_u8_roundtrip() {
624        let mut mem = Mem::try_new(1).unwrap();
625        mem.store_u8(0, 0xFF).unwrap();
626        assert_eq!(mem.load_u8(0), Ok(0xFF));
627    }
628
629    #[test]
630    fn load_u8_out_of_bounds() {
631        let mem = Mem::try_new(1).unwrap();
632        assert!(mem.load_u8(PAGE_SIZE - 1).is_ok());
633        assert_eq!(mem.load_u8(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
634    }
635
636    // ── load/store u16 ──
637
638    #[test]
639    fn store_load_u16_roundtrip() {
640        let mut mem = Mem::try_new(1).unwrap();
641        mem.store_u16(50, 0xBEEF).unwrap();
642        assert_eq!(mem.load_u16(50), Ok(0xBEEF));
643    }
644
645    // ── load/store f32 ──
646
647    #[test]
648    fn store_load_f32_roundtrip() {
649        let mut mem = Mem::try_new(1).unwrap();
650        mem.store_f32(300, core::f32::consts::PI).unwrap();
651        assert_eq!(mem.load_f32(300), Ok(core::f32::consts::PI));
652    }
653
654    // ── load/store f64 ──
655
656    #[test]
657    fn store_load_f64_roundtrip() {
658        let mut mem = Mem::try_new(1).unwrap();
659        mem.store_f64(400, core::f64::consts::E).unwrap();
660        assert_eq!(mem.load_f64(400), Ok(core::f64::consts::E));
661    }
662
663    // ── unchecked variants ──
664
665    #[test]
666    fn unchecked_i32_roundtrip() {
667        let mut mem = Mem::try_new(1).unwrap();
668        unsafe {
669            mem.store_i32_unchecked(100, 42);
670            assert_eq!(mem.load_i32_unchecked(100), 42);
671        }
672    }
673
674    #[test]
675    fn unchecked_i64_roundtrip() {
676        let mut mem = Mem::try_new(1).unwrap();
677        unsafe {
678            mem.store_i64_unchecked(200, -1i64);
679            assert_eq!(mem.load_i64_unchecked(200), -1i64);
680        }
681    }
682
683    // ── active_pages boundary ──
684
685    #[test]
686    fn access_beyond_active_pages_traps() {
687        // MAX_PAGES=2 but only 1 page active
688        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
689        // Within active region: OK
690        assert!(mem.load_i32(0).is_ok());
691        // Beyond active_pages but within backing array: still OOB
692        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
693    }
694
695    #[test]
696    fn grow_then_access_new_region() {
697        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
698        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
699        mem.grow(1);
700        // Now page 2 is active — access succeeds
701        assert!(mem.load_i32(PAGE_SIZE).is_ok());
702        mem.store_i32(PAGE_SIZE, 99).unwrap();
703        assert_eq!(mem.load_i32(PAGE_SIZE), Ok(99));
704    }
705
706    // ── init_data ──
707
708    #[test]
709    fn init_data_writes_bytes() {
710        let mut mem = Mem::try_new(1).unwrap();
711        mem.init_data(10, &[1u8, 2, 3, 4]).unwrap();
712        assert_eq!(mem.load_u8(10).unwrap(), 1);
713        assert_eq!(mem.load_u8(11).unwrap(), 2);
714        assert_eq!(mem.load_u8(12).unwrap(), 3);
715        assert_eq!(mem.load_u8(13).unwrap(), 4);
716    }
717
718    #[test]
719    fn init_data_empty_slice_is_noop() {
720        let mut mem = Mem::try_new(1).unwrap();
721        assert!(mem.init_data(0, &[]).is_ok());
722    }
723
724    #[test]
725    fn init_data_out_of_bounds() {
726        let mut mem = Mem::try_new(1).unwrap();
727        let data = [0u8; 10];
728        assert_eq!(
729            mem.init_data(PAGE_SIZE - 5, &data),
730            Err(WasmTrap::OutOfBounds)
731        );
732    }
733
734    #[test]
735    fn init_data_at_boundary() {
736        let mut mem = Mem::try_new(1).unwrap();
737        let data = [42u8; 4];
738        assert!(mem.init_data(PAGE_SIZE - 4, &data).is_ok());
739        assert_eq!(mem.load_u8(PAGE_SIZE - 1).unwrap(), 42);
740    }
741
742    #[test]
743    fn init_data_overwrites_existing() {
744        let mut mem = Mem::try_new(1).unwrap();
745        mem.store_u8(5, 0xFF).unwrap();
746        mem.init_data(5, &[0xABu8]).unwrap();
747        assert_eq!(mem.load_u8(5).unwrap(), 0xAB);
748    }
749
750    // ── little-endian encoding ──
751
752    #[test]
753    fn i32_is_little_endian() {
754        let mut mem = Mem::try_new(1).unwrap();
755        mem.store_i32(0, 0x04030201).unwrap();
756        assert_eq!(mem.load_u8(0), Ok(0x01));
757        assert_eq!(mem.load_u8(1), Ok(0x02));
758        assert_eq!(mem.load_u8(2), Ok(0x03));
759        assert_eq!(mem.load_u8(3), Ok(0x04));
760    }
761}
762
763// ── Kani Formal Verification Proofs ──────────────────────────────────────
764//
765// These proof harnesses exhaustively verify core invariants of IsolatedMemory
766// using Kani's bounded model checker. Run with: cargo kani -p herkos-runtime
767//
768// The proofs establish that:
769// - All load/store operations either succeed or return Err (never panic)
770// - grow respects MAX_PAGES and zero-initializes new pages
771// - Store/load roundtrips preserve values
772// - Offset overflow is handled correctly
773// - active_pages never exceeds MAX_PAGES
774
775#[cfg(kani)]
776mod proofs {
777    use super::*;
778
779    /// Proof: load_i32 never panics, only returns Ok or Err(OutOfBounds).
780    /// Verifies bounds checking correctness for all possible offsets.
781    #[kani::proof]
782    #[kani::unwind(1)]
783    fn load_i32_never_panics() {
784        let mem = IsolatedMemory::<4>::new(1); // 1 page active = 64 KiB
785        let offset: usize = kani::any();
786
787        // Should return Ok or Err(OutOfBounds), never panic
788        let result = mem.load_i32(offset);
789
790        // If successful, offset must be in valid range
791        if result.is_ok() {
792            kani::assert(
793                offset.checked_add(4).is_some(),
794                "successful load must not overflow",
795            );
796            kani::assert(
797                offset + 4 <= mem.active_size(),
798                "successful load must be within active region",
799            );
800        }
801    }
802
803    /// Proof: load_i64 never panics for any offset.
804    #[kani::proof]
805    #[kani::unwind(1)]
806    fn load_i64_never_panics() {
807        let mem = IsolatedMemory::<4>::new(2);
808        let offset: usize = kani::any();
809        let _ = mem.load_i64(offset);
810        // Just checking it doesn't panic - Kani verifies this exhaustively
811    }
812
813    /// Proof: load_u8 never panics for any offset.
814    #[kani::proof]
815    #[kani::unwind(1)]
816    fn load_u8_never_panics() {
817        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
818        let offset: usize = kani::any();
819        let _ = mem.load_u8(offset);
820    }
821
822    /// Proof: load_u16 never panics for any offset.
823    #[kani::proof]
824    #[kani::unwind(1)]
825    fn load_u16_never_panics() {
826        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
827        let offset: usize = kani::any();
828        let _ = mem.load_u16(offset);
829    }
830
831    /// Proof: load_f32 never panics for any offset.
832    #[kani::proof]
833    #[kani::unwind(1)]
834    fn load_f32_never_panics() {
835        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
836        let offset: usize = kani::any();
837        let _ = mem.load_f32(offset);
838    }
839
840    /// Proof: load_f64 never panics for any offset.
841    #[kani::proof]
842    #[kani::unwind(1)]
843    fn load_f64_never_panics() {
844        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
845        let offset: usize = kani::any();
846        let _ = mem.load_f64(offset);
847    }
848
849    /// Proof: store_i32 never panics for any offset and value.
850    #[kani::proof]
851    #[kani::unwind(1)]
852    fn store_i32_never_panics() {
853        let mut mem = IsolatedMemory::<4>::new(1);
854        let offset: usize = kani::any();
855        let value: i32 = kani::any();
856        let _ = mem.store_i32(offset, value);
857    }
858
859    /// Proof: store_i64 never panics for any offset and value.
860    #[kani::proof]
861    #[kani::unwind(1)]
862    fn store_i64_never_panics() {
863        let mut mem = IsolatedMemory::<4>::new(2);
864        let offset: usize = kani::any();
865        let value: i64 = kani::any();
866        let _ = mem.store_i64(offset, value);
867    }
868
869    /// Proof: store_u8 never panics for any offset and value.
870    #[kani::proof]
871    #[kani::unwind(1)]
872    fn store_u8_never_panics() {
873        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
874        let offset: usize = kani::any();
875        let value: u8 = kani::any();
876        let _ = mem.store_u8(offset, value);
877    }
878
879    /// Proof: store_u16 never panics for any offset and value.
880    #[kani::proof]
881    #[kani::unwind(1)]
882    fn store_u16_never_panics() {
883        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
884        let offset: usize = kani::any();
885        let value: u16 = kani::any();
886        let _ = mem.store_u16(offset, value);
887    }
888
889    /// Proof: store_f32 never panics for any offset and value.
890    #[kani::proof]
891    #[kani::unwind(1)]
892    fn store_f32_never_panics() {
893        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
894        let offset: usize = kani::any();
895        let value: f32 = kani::any();
896        let _ = mem.store_f32(offset, value);
897    }
898
899    /// Proof: store_f64 never panics for any offset and value.
900    #[kani::proof]
901    #[kani::unwind(1)]
902    fn store_f64_never_panics() {
903        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
904        let offset: usize = kani::any();
905        let value: f64 = kani::any();
906        let _ = mem.store_f64(offset, value);
907    }
908
909    /// Proof: grow respects MAX_PAGES — active_pages never exceeds it.
910    #[kani::proof]
911    #[kani::unwind(5)]
912    fn grow_respects_max_pages() {
913        let mut mem = IsolatedMemory::<4>::new(1);
914        let delta: u32 = kani::any();
915
916        let old_pages = mem.page_count();
917        let result = mem.grow(delta);
918
919        // active_pages must never exceed MAX_PAGES
920        kani::assert(
921            mem.page_count() <= 4,
922            "active_pages must not exceed MAX_PAGES",
923        );
924
925        // If grow succeeded, result should be old page count
926        if result >= 0 {
927            kani::assert(result == old_pages as i32, "grow returns old page count");
928            // New page count is old + delta (if it fit)
929            let new_expected = old_pages as u64 + delta as u64;
930            if new_expected <= 4 {
931                kani::assert(
932                    mem.page_count() == new_expected as usize,
933                    "grow updates active_pages correctly",
934                );
935            }
936        } else {
937            // If grow failed, active_pages unchanged
938            kani::assert(
939                mem.page_count() == old_pages,
940                "failed grow leaves active_pages unchanged",
941            );
942        }
943    }
944
945    /// Proof: grow returns -1 (failure) if new size would exceed MAX_PAGES.
946    #[kani::proof]
947    #[kani::unwind(4)]
948    fn grow_fails_beyond_max() {
949        let mut mem = IsolatedMemory::<4>::new(2);
950        // Try to grow by 3 pages: 2 + 3 = 5 > 4 (MAX_PAGES)
951        let result = mem.grow(3);
952        kani::assert(result == -1, "grow beyond MAX_PAGES returns -1");
953        kani::assert(mem.page_count() == 2, "failed grow leaves pages unchanged");
954    }
955
956    /// Proof: store followed by load returns the same value (i32).
957    #[kani::proof]
958    #[kani::unwind(1)]
959    fn store_load_roundtrip_i32() {
960        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
961        let offset: usize = kani::any();
962        let value: i32 = kani::any();
963
964        // If store succeeds, load at the same offset must return the same value
965        if mem.store_i32(offset, value).is_ok() {
966            let loaded = mem.load_i32(offset);
967            kani::assert(loaded.is_ok(), "load succeeds after successful store");
968            kani::assert(loaded.unwrap() == value, "load returns the stored value");
969        }
970    }
971
972    /// Proof: store followed by load returns the same value (i64).
973    #[kani::proof]
974    #[kani::unwind(1)]
975    fn store_load_roundtrip_i64() {
976        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
977        let offset: usize = kani::any();
978        let value: i64 = kani::any();
979
980        if mem.store_i64(offset, value).is_ok() {
981            kani::assert(
982                mem.load_i64(offset) == Ok(value),
983                "i64 roundtrip preserves value",
984            );
985        }
986    }
987
988    /// Proof: store followed by load returns the same value (u8).
989    #[kani::proof]
990    #[kani::unwind(1)]
991    fn store_load_roundtrip_u8() {
992        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
993        let offset: usize = kani::any();
994        let value: u8 = kani::any();
995
996        if mem.store_u8(offset, value).is_ok() {
997            kani::assert(
998                mem.load_u8(offset) == Ok(value),
999                "u8 roundtrip preserves value",
1000            );
1001        }
1002    }
1003
1004    /// Proof: store followed by load returns the same value (u16).
1005    #[kani::proof]
1006    #[kani::unwind(1)]
1007    fn store_load_roundtrip_u16() {
1008        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1009        let offset: usize = kani::any();
1010        let value: u16 = kani::any();
1011
1012        if mem.store_u16(offset, value).is_ok() {
1013            kani::assert(
1014                mem.load_u16(offset) == Ok(value),
1015                "u16 roundtrip preserves value",
1016            );
1017        }
1018    }
1019
1020    /// Proof: grow zero-initializes new pages.
1021    #[kani::proof]
1022    #[kani::unwind(2)]
1023    fn grow_zeroes_new_pages() {
1024        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1025
1026        let result = mem.grow(1);
1027
1028        if result >= 0 {
1029            // After grow, the new page should be zero
1030            // Read a value from the newly activated page
1031            let value = mem.load_i32(PAGE_SIZE);
1032            if value.is_ok() {
1033                kani::assert(value.unwrap() == 0, "newly grown page is zero-initialized");
1034            }
1035        }
1036    }
1037
1038    /// Proof: offset overflow is handled safely (no panic, returns OutOfBounds).
1039    #[kani::proof]
1040    #[kani::unwind(1)]
1041    fn offset_overflow_handled() {
1042        let mem = IsolatedMemory::<1>::try_new(1).unwrap();
1043        // Try to load at maximum possible offset (will overflow when adding size)
1044        let result = mem.load_i32(usize::MAX);
1045        kani::assert(
1046            result == Err(WasmTrap::OutOfBounds),
1047            "overflow offset returns OutOfBounds",
1048        );
1049    }
1050
1051    /// Proof: accesses beyond active_pages (but within MAX_PAGES) are rejected.
1052    #[kani::proof]
1053    #[kani::unwind(1)]
1054    fn access_beyond_active_pages_rejected() {
1055        // MAX_PAGES=2 but only 1 active
1056        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
1057
1058        // Access in first page: should succeed
1059        let result1 = mem.load_i32(0);
1060        kani::assert(result1.is_ok(), "access within active pages succeeds");
1061
1062        // Access in second page (not active yet): should fail
1063        let result2 = mem.load_i32(PAGE_SIZE);
1064        kani::assert(
1065            result2 == Err(WasmTrap::OutOfBounds),
1066            "access beyond active_pages is rejected",
1067        );
1068    }
1069
1070    /// Proof: active_size always equals active_pages * PAGE_SIZE.
1071    #[kani::proof]
1072    #[kani::unwind(1)]
1073    fn active_size_invariant() {
1074        let mem = IsolatedMemory::<4>::new(2);
1075        kani::assert(
1076            mem.active_size() == mem.page_count() * PAGE_SIZE,
1077            "active_size = active_pages * PAGE_SIZE",
1078        );
1079    }
1080
1081    /// Proof: size() returns active_pages as i32.
1082    #[kani::proof]
1083    #[kani::unwind(1)]
1084    fn size_returns_page_count() {
1085        let mem = IsolatedMemory::<4>::new(3);
1086        kani::assert(
1087            mem.size() == mem.page_count() as i32,
1088            "size() returns active_pages",
1089        );
1090    }
1091
1092    /// Proof: successful load requires offset + type_size <= active_size.
1093    #[kani::proof]
1094    #[kani::unwind(1)]
1095    fn load_success_implies_valid_range() {
1096        let mem = IsolatedMemory::<1>::try_new(1).unwrap();
1097        let offset: usize = kani::any();
1098
1099        let result = mem.load_i32(offset);
1100
1101        if result.is_ok() {
1102            // Success implies: offset + 4 <= active_size and no overflow
1103            let end = offset.checked_add(4);
1104            kani::assert(end.is_some(), "successful load offset does not overflow");
1105            kani::assert(
1106                end.unwrap() <= mem.active_size(),
1107                "successful load is within bounds",
1108            );
1109        }
1110    }
1111
1112    /// Proof: successful store requires offset + type_size <= active_size.
1113    #[kani::proof]
1114    #[kani::unwind(1)]
1115    fn store_success_implies_valid_range() {
1116        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1117        let offset: usize = kani::any();
1118        let value: i64 = kani::any();
1119
1120        let result = mem.store_i64(offset, value);
1121
1122        if result.is_ok() {
1123            let end = offset.checked_add(8);
1124            kani::assert(end.is_some(), "successful store offset does not overflow");
1125            kani::assert(
1126                end.unwrap() <= mem.active_size(),
1127                "successful store is within bounds",
1128            );
1129        }
1130    }
1131
1132    /// Proof: as_slice returns a slice of exactly active_size bytes.
1133    #[kani::proof]
1134    #[kani::unwind(1)]
1135    fn as_slice_length_correct() {
1136        let mem = IsolatedMemory::<4>::new(2);
1137        let slice = mem.as_slice();
1138        kani::assert(
1139            slice.len() == mem.active_size(),
1140            "as_slice length equals active_size",
1141        );
1142    }
1143
1144    /// Proof: as_mut_slice returns a slice of exactly active_size bytes.
1145    #[kani::proof]
1146    #[kani::unwind(1)]
1147    fn as_mut_slice_length_correct() {
1148        let mut mem = IsolatedMemory::<4>::new(2);
1149        let slice = mem.as_mut_slice();
1150        kani::assert(
1151            slice.len() == mem.active_size(),
1152            "as_mut_slice length equals active_size",
1153        );
1154    }
1155}