Skip to main content

herkos_runtime/
memory.rs

1//! WebAssembly linear memory — `IsolatedMemory<const MAX_PAGES: usize>`.
2//!
3//! The backing array is `[[u8; PAGE_SIZE]; MAX_PAGES]` — a 2D array that
4//! is contiguous in memory. We use `as_flattened()` (stable since Rust 1.80)
5//! to get a flat `&[u8]` view for the inner functions.
6//!
7//! Note: the spec shows `[u8; MAX_PAGES * PAGE_SIZE]` but that requires the
8//! unstable `generic_const_exprs` feature. The 2D array achieves identical
9//! layout on stable Rust.
10//!
11//! Load/store operations use the **outline pattern** (§13.3): the generic
12//! wrapper delegates to a non-generic inner function so that only one copy
13//! of the actual bounds-checking logic exists in the binary.
14
15use crate::{WasmResult, WasmTrap, PAGE_SIZE};
16
17/// Isolated linear memory for a single Wasm module.
18///
19/// `MAX_PAGES` is the compile-time maximum (from the Wasm module's declared
20/// maximum or a CLI override). The backing array is fully pre-allocated.
21pub struct IsolatedMemory<const MAX_PAGES: usize> {
22    /// Backing storage — `MAX_PAGES` pages of `PAGE_SIZE` bytes each.
23    /// Contiguous in memory, identical layout to `[u8; MAX_PAGES * PAGE_SIZE]`.
24    pages: [[u8; PAGE_SIZE]; MAX_PAGES],
25    /// Number of currently active pages. Starts at `initial_pages`,
26    /// incremented by `grow`. Accesses beyond `active_pages * PAGE_SIZE`
27    /// are out-of-bounds traps.
28    active_pages: usize,
29}
30
31impl<const MAX_PAGES: usize> IsolatedMemory<MAX_PAGES> {
32    /// Create a new `IsolatedMemory` with `initial_pages` active.
33    ///
34    /// # Errors
35    /// Returns `ConstructionError::MemoryInitialPagesExceedsMax` if `initial_pages > MAX_PAGES`.
36    #[inline(never)]
37    pub fn try_new(initial_pages: usize) -> Result<Self, crate::ConstructionError> {
38        if initial_pages > MAX_PAGES {
39            return Err(crate::ConstructionError::MemoryInitialPagesExceedsMax {
40                initial: initial_pages,
41                max: MAX_PAGES,
42            });
43        }
44        Ok(Self {
45            pages: [[0u8; PAGE_SIZE]; MAX_PAGES],
46            active_pages: initial_pages,
47        })
48    }
49
50    /// Initialize an `IsolatedMemory` in-place within a caller-provided slot.
51    ///
52    /// Unlike `try_new`, this writes directly into `slot` without ever creating
53    /// a large `Result<Self, E>` on the call stack. Use this when `MAX_PAGES`
54    /// is large, to avoid stack overflow in debug builds.
55    ///
56    /// # Errors
57    /// Returns `ConstructionError::MemoryInitialPagesExceedsMax` if `initial_pages > MAX_PAGES`.
58    #[inline(never)]
59    pub fn try_init(
60        slot: &mut core::mem::MaybeUninit<Self>,
61        initial_pages: usize,
62    ) -> Result<(), crate::ConstructionError> {
63        if initial_pages > MAX_PAGES {
64            return Err(crate::ConstructionError::MemoryInitialPagesExceedsMax {
65                initial: initial_pages,
66                max: MAX_PAGES,
67            });
68        }
69        let ptr = slot.as_mut_ptr();
70        // SAFETY: ptr comes from MaybeUninit so it is valid for writes and
71        // correctly aligned. Both fields are written before the caller can
72        // call assume_init on the slot.
73        unsafe {
74            // Zero the entire pages array in-place — the compiler emits a
75            // single memset; no large stack temporary is created.
76            core::ptr::addr_of_mut!((*ptr).pages).write_bytes(0, 1);
77            core::ptr::addr_of_mut!((*ptr).active_pages).write(initial_pages);
78        }
79        Ok(())
80    }
81
82    /// Current number of active pages.
83    #[inline(always)]
84    pub fn page_count(&self) -> usize {
85        self.active_pages
86    }
87
88    /// Current active size in bytes.
89    #[inline(always)]
90    pub fn active_size(&self) -> usize {
91        self.active_pages * PAGE_SIZE
92    }
93
94    /// Wasm `memory.grow` — returns previous page count, or -1 on failure.
95    /// No allocation occurs: the backing array is already sized to `MAX_PAGES`.
96    pub fn grow(&mut self, delta: u32) -> i32 {
97        let old = self.active_pages;
98        let new = old.wrapping_add(delta as usize);
99        if new > MAX_PAGES {
100            return -1;
101        }
102        // Zero-init the new pages (Wasm spec requires it).
103        for page in &mut self.pages[old..new] {
104            page.fill(0);
105        }
106        self.active_pages = new;
107        old as i32
108    }
109
110    /// Wasm `memory.size` — returns current page count.
111    #[inline(always)]
112    pub fn size(&self) -> i32 {
113        self.active_pages as i32
114    }
115
116    /// Flat read-only view of the full backing memory.
117    #[inline(always)]
118    fn flat(&self) -> &[u8] {
119        self.pages.as_flattened()
120    }
121
122    /// Flat mutable view of the full backing memory.
123    #[inline(always)]
124    fn flat_mut(&mut self) -> &mut [u8] {
125        self.pages.as_flattened_mut()
126    }
127
128    // ── Bulk memory operations ────────────────────────────────────────
129
130    /// Wasm `memory.copy` — copy `len` bytes from `src` to `dst`.
131    ///
132    /// Semantics match `memmove`: overlapping source and destination regions
133    /// are handled correctly. Traps (`OutOfBounds`) if either region extends
134    /// beyond the current active memory.
135    pub fn memory_copy(&mut self, dst: u32, src: u32, len: u32) -> WasmResult<()> {
136        let active = self.active_size();
137        let dst = dst as usize;
138        let src = src as usize;
139        let len = len as usize;
140        if src.checked_add(len).is_none_or(|end| end > active)
141            || dst.checked_add(len).is_none_or(|end| end > active)
142        {
143            return Err(WasmTrap::OutOfBounds);
144        }
145        self.flat_mut().copy_within(src..src + len, dst);
146        Ok(())
147    }
148
149    /// Wasm `memory.fill` — fill `len` bytes starting at `dst` with `val`.
150    ///
151    /// Only the low 8 bits of `val` are used (Wasm spec). Traps (`OutOfBounds`)
152    /// if the region extends beyond the current active memory.
153    pub fn fill(&mut self, dst: usize, val: u8, len: usize) -> WasmResult<()> {
154        let active = self.active_size();
155        fill_inner(self.flat_mut(), active, dst, val, len)
156    }
157
158    /// Wasm `memory.init` — copy `len` bytes from `data[src_offset..]` into
159    /// linear memory at `dst`.
160    ///
161    /// Unlike `init_data` (which copies an entire slice), this copies a
162    /// sub-range of a passive data segment. Traps (`OutOfBounds`) if either
163    /// the source range extends beyond `data` or the destination region extends
164    /// beyond active memory.
165    pub fn init_data_partial(
166        &mut self,
167        dst: usize,
168        data: &[u8],
169        src_offset: usize,
170        len: usize,
171    ) -> WasmResult<()> {
172        let active = self.active_size();
173        init_data_partial_inner(self.flat_mut(), active, dst, data, src_offset, len)
174    }
175
176    // ── Bounds-checked (safe) load/store ──────────────────────────────
177
178    /// Load an i32 from linear memory with bounds checking.
179    #[inline(always)]
180    pub fn load_i32(&self, offset: usize) -> WasmResult<i32> {
181        load_i32_inner(self.flat(), self.active_size(), offset)
182    }
183
184    /// Load an i64 from linear memory with bounds checking.
185    #[inline(always)]
186    pub fn load_i64(&self, offset: usize) -> WasmResult<i64> {
187        load_i64_inner(self.flat(), self.active_size(), offset)
188    }
189
190    /// Load a u8 (i32.load8_u) from linear memory with bounds checking.
191    #[inline(always)]
192    pub fn load_u8(&self, offset: usize) -> WasmResult<u8> {
193        load_u8_inner(self.flat(), self.active_size(), offset)
194    }
195
196    /// Load a u16 (i32.load16_u) from linear memory with bounds checking.
197    #[inline(always)]
198    pub fn load_u16(&self, offset: usize) -> WasmResult<u16> {
199        load_u16_inner(self.flat(), self.active_size(), offset)
200    }
201
202    /// Load an f32 from linear memory with bounds checking.
203    #[inline(always)]
204    pub fn load_f32(&self, offset: usize) -> WasmResult<f32> {
205        load_f32_inner(self.flat(), self.active_size(), offset)
206    }
207
208    /// Load an f64 from linear memory with bounds checking.
209    #[inline(always)]
210    pub fn load_f64(&self, offset: usize) -> WasmResult<f64> {
211        load_f64_inner(self.flat(), self.active_size(), offset)
212    }
213
214    /// Store an i32 into linear memory with bounds checking.
215    #[inline(always)]
216    pub fn store_i32(&mut self, offset: usize, value: i32) -> WasmResult<()> {
217        let active = self.active_size();
218        store_i32_inner(self.flat_mut(), active, offset, value)
219    }
220
221    /// Store an i64 into linear memory with bounds checking.
222    #[inline(always)]
223    pub fn store_i64(&mut self, offset: usize, value: i64) -> WasmResult<()> {
224        let active = self.active_size();
225        store_i64_inner(self.flat_mut(), active, offset, value)
226    }
227
228    /// Store a u8 (i32.store8) into linear memory with bounds checking.
229    #[inline(always)]
230    pub fn store_u8(&mut self, offset: usize, value: u8) -> WasmResult<()> {
231        let active = self.active_size();
232        store_u8_inner(self.flat_mut(), active, offset, value)
233    }
234
235    /// Store a u16 (i32.store16) into linear memory with bounds checking.
236    #[inline(always)]
237    pub fn store_u16(&mut self, offset: usize, value: u16) -> WasmResult<()> {
238        let active = self.active_size();
239        store_u16_inner(self.flat_mut(), active, offset, value)
240    }
241
242    /// Store an f32 into linear memory with bounds checking.
243    #[inline(always)]
244    pub fn store_f32(&mut self, offset: usize, value: f32) -> WasmResult<()> {
245        let active = self.active_size();
246        store_f32_inner(self.flat_mut(), active, offset, value)
247    }
248
249    /// Store an f64 into linear memory with bounds checking.
250    #[inline(always)]
251    pub fn store_f64(&mut self, offset: usize, value: f64) -> WasmResult<()> {
252        let active = self.active_size();
253        store_f64_inner(self.flat_mut(), active, offset, value)
254    }
255
256    /// Initialize a region of memory from a byte slice (Wasm data segment).
257    ///
258    /// Copies `data` into linear memory starting at `offset`. Equivalent to
259    /// calling `store_u8` for each byte, but avoids emitting N separate
260    /// function calls in generated code.
261    ///
262    /// # Errors
263    /// Returns `Err(WasmTrap::OutOfBounds)` if `offset + data.len()` exceeds
264    /// `active_pages * PAGE_SIZE`.
265    #[inline(always)]
266    pub fn init_data(&mut self, offset: usize, data: &[u8]) -> WasmResult<()> {
267        let active = self.active_size();
268        init_data_inner(self.flat_mut(), active, offset, data)
269    }
270
271    // ── Unchecked (verified) load/store ───────────────────────────────
272    //
273    // These skip bounds checking entirely. The caller MUST guarantee that
274    // the access is in-bounds, justified by a formal proof.
275
276    /// Load i32 without bounds checking.
277    ///
278    /// # Safety
279    /// Caller must guarantee `offset + 3 < active_size()`.
280    #[inline(always)]
281    pub unsafe fn load_i32_unchecked(&self, offset: usize) -> i32 {
282        load_i32_unchecked_inner(self.flat(), offset)
283    }
284
285    /// Load i64 without bounds checking.
286    ///
287    /// # Safety
288    /// Caller must guarantee `offset + 7 < active_size()`.
289    #[inline(always)]
290    pub unsafe fn load_i64_unchecked(&self, offset: usize) -> i64 {
291        load_i64_unchecked_inner(self.flat(), offset)
292    }
293
294    /// Store i32 without bounds checking.
295    ///
296    /// # Safety
297    /// Caller must guarantee `offset + 3 < active_size()`.
298    #[inline(always)]
299    pub unsafe fn store_i32_unchecked(&mut self, offset: usize, value: i32) {
300        store_i32_unchecked_inner(self.flat_mut(), offset, value)
301    }
302
303    /// Store i64 without bounds checking.
304    ///
305    /// # Safety
306    /// Caller must guarantee `offset + 7 < active_size()`.
307    #[inline(always)]
308    pub unsafe fn store_i64_unchecked(&mut self, offset: usize, value: i64) {
309        store_i64_unchecked_inner(self.flat_mut(), offset, value)
310    }
311
312    /// Read-only access to the active memory region.
313    #[inline(always)]
314    pub fn as_slice(&self) -> &[u8] {
315        &self.flat()[..self.active_size()]
316    }
317
318    /// Mutable access to the active memory region.
319    #[inline(always)]
320    pub fn as_mut_slice(&mut self) -> &mut [u8] {
321        let size = self.active_size();
322        &mut self.flat_mut()[..size]
323    }
324}
325
326// ── Helpers ───────────────────────────────────────────────────────────
327
328/// Bounds-check and return the sub-slice `memory[offset..offset+N]`.
329/// Returns `Err(OutOfBounds)` on overflow or out-of-range — never panics.
330#[inline(always)]
331fn checked_slice(
332    memory: &[u8],
333    active_bytes: usize,
334    offset: usize,
335    len: usize,
336) -> WasmResult<&[u8]> {
337    let end = offset.checked_add(len).ok_or(WasmTrap::OutOfBounds)?;
338    if end > active_bytes {
339        return Err(WasmTrap::OutOfBounds);
340    }
341    // SAFETY: we just verified end <= active_bytes <= memory.len().
342    // get() would also work but returns Option, adding another branch.
343    // This is safe because the bounds are proven above.
344    memory.get(offset..end).ok_or(WasmTrap::OutOfBounds)
345}
346
347/// Mutable variant of `checked_slice`.
348#[inline(always)]
349fn checked_slice_mut(
350    memory: &mut [u8],
351    active_bytes: usize,
352    offset: usize,
353    len: usize,
354) -> WasmResult<&mut [u8]> {
355    let end = offset.checked_add(len).ok_or(WasmTrap::OutOfBounds)?;
356    if end > active_bytes {
357        return Err(WasmTrap::OutOfBounds);
358    }
359    memory.get_mut(offset..end).ok_or(WasmTrap::OutOfBounds)
360}
361
362/// Convert a slice to a fixed-size array. Returns `Err(OutOfBounds)` if
363/// the length doesn't match — never panics.
364#[inline(always)]
365fn to_array<const N: usize>(slice: &[u8]) -> WasmResult<[u8; N]> {
366    slice.try_into().map_err(|_| WasmTrap::OutOfBounds)
367}
368
369// ── Non-generic inner functions (outline pattern, §13.3) ─────────────
370//
371// ONE copy of each function in the binary, regardless of how many
372// `MAX_PAGES` instantiations exist. The generic wrappers above compile
373// to a single call instruction each.
374//
375// No unwrap(), no indexing, no panic paths.
376
377#[inline(never)]
378fn load_i32_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<i32> {
379    let s = checked_slice(memory, active_bytes, offset, 4)?;
380    Ok(i32::from_le_bytes(to_array(s)?))
381}
382
383#[inline(never)]
384fn load_i64_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<i64> {
385    let s = checked_slice(memory, active_bytes, offset, 8)?;
386    Ok(i64::from_le_bytes(to_array(s)?))
387}
388
389#[inline(never)]
390fn load_u8_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<u8> {
391    let s = checked_slice(memory, active_bytes, offset, 1)?;
392    Ok(s[0])
393}
394
395#[inline(never)]
396fn load_u16_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<u16> {
397    let s = checked_slice(memory, active_bytes, offset, 2)?;
398    Ok(u16::from_le_bytes(to_array(s)?))
399}
400
401#[inline(never)]
402fn load_f32_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<f32> {
403    let s = checked_slice(memory, active_bytes, offset, 4)?;
404    Ok(f32::from_le_bytes(to_array(s)?))
405}
406
407#[inline(never)]
408fn load_f64_inner(memory: &[u8], active_bytes: usize, offset: usize) -> WasmResult<f64> {
409    let s = checked_slice(memory, active_bytes, offset, 8)?;
410    Ok(f64::from_le_bytes(to_array(s)?))
411}
412
413#[inline(never)]
414fn store_i32_inner(
415    memory: &mut [u8],
416    active_bytes: usize,
417    offset: usize,
418    value: i32,
419) -> WasmResult<()> {
420    let s = checked_slice_mut(memory, active_bytes, offset, 4)?;
421    s.copy_from_slice(&value.to_le_bytes());
422    Ok(())
423}
424
425#[inline(never)]
426fn store_i64_inner(
427    memory: &mut [u8],
428    active_bytes: usize,
429    offset: usize,
430    value: i64,
431) -> WasmResult<()> {
432    let s = checked_slice_mut(memory, active_bytes, offset, 8)?;
433    s.copy_from_slice(&value.to_le_bytes());
434    Ok(())
435}
436
437#[inline(never)]
438fn store_u8_inner(
439    memory: &mut [u8],
440    active_bytes: usize,
441    offset: usize,
442    value: u8,
443) -> WasmResult<()> {
444    let s = checked_slice_mut(memory, active_bytes, offset, 1)?;
445    s[0] = value;
446    Ok(())
447}
448
449#[inline(never)]
450fn store_u16_inner(
451    memory: &mut [u8],
452    active_bytes: usize,
453    offset: usize,
454    value: u16,
455) -> WasmResult<()> {
456    let s = checked_slice_mut(memory, active_bytes, offset, 2)?;
457    s.copy_from_slice(&value.to_le_bytes());
458    Ok(())
459}
460
461#[inline(never)]
462fn store_f32_inner(
463    memory: &mut [u8],
464    active_bytes: usize,
465    offset: usize,
466    value: f32,
467) -> WasmResult<()> {
468    let s = checked_slice_mut(memory, active_bytes, offset, 4)?;
469    s.copy_from_slice(&value.to_le_bytes());
470    Ok(())
471}
472
473#[inline(never)]
474fn store_f64_inner(
475    memory: &mut [u8],
476    active_bytes: usize,
477    offset: usize,
478    value: f64,
479) -> WasmResult<()> {
480    let s = checked_slice_mut(memory, active_bytes, offset, 8)?;
481    s.copy_from_slice(&value.to_le_bytes());
482    Ok(())
483}
484
485#[inline(never)]
486fn init_data_inner(
487    memory: &mut [u8],
488    active_bytes: usize,
489    offset: usize,
490    data: &[u8],
491) -> WasmResult<()> {
492    let dst = checked_slice_mut(memory, active_bytes, offset, data.len())?;
493    dst.copy_from_slice(data);
494    Ok(())
495}
496
497#[inline(never)]
498fn fill_inner(
499    memory: &mut [u8],
500    active_bytes: usize,
501    dst: usize,
502    val: u8,
503    len: usize,
504) -> WasmResult<()> {
505    let region = checked_slice_mut(memory, active_bytes, dst, len)?;
506    region.fill(val);
507    Ok(())
508}
509
510#[inline(never)]
511fn init_data_partial_inner(
512    memory: &mut [u8],
513    active_bytes: usize,
514    dst: usize,
515    data: &[u8],
516    src_offset: usize,
517    len: usize,
518) -> WasmResult<()> {
519    let src_end = src_offset.checked_add(len).ok_or(WasmTrap::OutOfBounds)?;
520    if src_end > data.len() {
521        return Err(WasmTrap::OutOfBounds);
522    }
523    let src = &data[src_offset..src_end];
524    let dst_region = checked_slice_mut(memory, active_bytes, dst, len)?;
525    dst_region.copy_from_slice(src);
526    Ok(())
527}
528
529// ── Unchecked inner functions ─────────────────────────────────────────
530//
531// SAFETY: the caller (verified backend) guarantees the offset is in-bounds,
532// justified by a formal proof. These use get_unchecked
533// (no bounds check) and read_unaligned (no alignment requirement, matching
534// Wasm's unaligned memory semantics).
535
536#[inline(never)]
537unsafe fn load_i32_unchecked_inner(memory: &[u8], offset: usize) -> i32 {
538    let ptr = memory.as_ptr().add(offset) as *const i32;
539    i32::from_le(ptr.read_unaligned())
540}
541
542#[inline(never)]
543unsafe fn load_i64_unchecked_inner(memory: &[u8], offset: usize) -> i64 {
544    let ptr = memory.as_ptr().add(offset) as *const i64;
545    i64::from_le(ptr.read_unaligned())
546}
547
548#[inline(never)]
549unsafe fn store_i32_unchecked_inner(memory: &mut [u8], offset: usize, value: i32) {
550    let ptr = memory.as_mut_ptr().add(offset) as *mut i32;
551    ptr.write_unaligned(value.to_le());
552}
553
554#[inline(never)]
555unsafe fn store_i64_unchecked_inner(memory: &mut [u8], offset: usize, value: i64) {
556    let ptr = memory.as_mut_ptr().add(offset) as *mut i64;
557    ptr.write_unaligned(value.to_le());
558}
559
560#[cfg(test)]
561mod tests {
562    use super::*;
563
564    // Use MAX_PAGES=1 for tests — 1 page = 64 KiB, fits on stack in test.
565    type Mem = IsolatedMemory<1>;
566
567    #[test]
568    fn new_initializes_to_zero() {
569        let mem = Mem::try_new(1).unwrap();
570        assert_eq!(mem.page_count(), 1);
571        assert_eq!(mem.active_size(), PAGE_SIZE);
572        assert!(mem.as_slice().iter().all(|&b| b == 0));
573    }
574
575    #[test]
576    fn try_new_fails_if_initial_exceeds_max() {
577        let result = Mem::try_new(2);
578        assert!(result.is_err());
579        assert!(matches!(
580            result,
581            Err(crate::ConstructionError::MemoryInitialPagesExceedsMax { initial: 2, max: 1 })
582        ));
583    }
584
585    // ── grow ──
586
587    #[test]
588    fn grow_success() {
589        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
590        assert_eq!(mem.grow(2), 1); // old page count
591        assert_eq!(mem.page_count(), 3);
592    }
593
594    #[test]
595    fn grow_to_max() {
596        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
597        assert_eq!(mem.grow(3), 1);
598        assert_eq!(mem.page_count(), 4);
599    }
600
601    #[test]
602    fn grow_beyond_max_fails() {
603        let mut mem = IsolatedMemory::<4>::try_new(1).unwrap();
604        assert_eq!(mem.grow(4), -1); // would be 5 pages > 4
605        assert_eq!(mem.page_count(), 1); // unchanged
606    }
607
608    #[test]
609    fn grow_zero_is_noop() {
610        let mut mem = Mem::try_new(1).unwrap();
611        assert_eq!(mem.grow(0), 1);
612        assert_eq!(mem.page_count(), 1);
613    }
614
615    #[test]
616    fn grow_zeroes_new_pages() {
617        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
618        assert_eq!(mem.grow(1), 1);
619        // Verify new page is zero via flat view
620        let flat = mem.flat();
621        let new_start = PAGE_SIZE;
622        let new_end = 2 * PAGE_SIZE;
623        assert!(flat[new_start..new_end].iter().all(|&b| b == 0));
624    }
625
626    #[test]
627    fn size_returns_page_count() {
628        let mem = IsolatedMemory::<4>::try_new(2).unwrap();
629        assert_eq!(mem.size(), 2);
630    }
631
632    // ── load/store i32 ──
633
634    #[test]
635    fn store_load_i32_roundtrip() {
636        let mut mem = Mem::try_new(1).unwrap();
637        mem.store_i32(100, 0x12345678).unwrap();
638        assert_eq!(mem.load_i32(100), Ok(0x12345678));
639    }
640
641    #[test]
642    fn load_i32_out_of_bounds() {
643        let mem = Mem::try_new(1).unwrap();
644        // Last valid offset for i32: PAGE_SIZE - 4
645        assert!(mem.load_i32(PAGE_SIZE - 4).is_ok());
646        assert_eq!(mem.load_i32(PAGE_SIZE - 3), Err(WasmTrap::OutOfBounds));
647        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
648    }
649
650    #[test]
651    fn store_i32_out_of_bounds() {
652        let mut mem = Mem::try_new(1).unwrap();
653        assert!(mem.store_i32(PAGE_SIZE - 4, 42).is_ok());
654        assert_eq!(mem.store_i32(PAGE_SIZE - 3, 42), Err(WasmTrap::OutOfBounds));
655    }
656
657    #[test]
658    fn load_i32_offset_overflow() {
659        let mem = Mem::try_new(1).unwrap();
660        assert_eq!(mem.load_i32(usize::MAX), Err(WasmTrap::OutOfBounds));
661    }
662
663    // ── load/store i64 ──
664
665    #[test]
666    fn store_load_i64_roundtrip() {
667        let mut mem = Mem::try_new(1).unwrap();
668        mem.store_i64(200, 0x0102030405060708i64).unwrap();
669        assert_eq!(mem.load_i64(200), Ok(0x0102030405060708i64));
670    }
671
672    #[test]
673    fn load_i64_out_of_bounds() {
674        let mem = Mem::try_new(1).unwrap();
675        assert!(mem.load_i64(PAGE_SIZE - 8).is_ok());
676        assert_eq!(mem.load_i64(PAGE_SIZE - 7), Err(WasmTrap::OutOfBounds));
677    }
678
679    // ── load/store u8 ──
680
681    #[test]
682    fn store_load_u8_roundtrip() {
683        let mut mem = Mem::try_new(1).unwrap();
684        mem.store_u8(0, 0xFF).unwrap();
685        assert_eq!(mem.load_u8(0), Ok(0xFF));
686    }
687
688    #[test]
689    fn load_u8_out_of_bounds() {
690        let mem = Mem::try_new(1).unwrap();
691        assert!(mem.load_u8(PAGE_SIZE - 1).is_ok());
692        assert_eq!(mem.load_u8(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
693    }
694
695    // ── load/store u16 ──
696
697    #[test]
698    fn store_load_u16_roundtrip() {
699        let mut mem = Mem::try_new(1).unwrap();
700        mem.store_u16(50, 0xBEEF).unwrap();
701        assert_eq!(mem.load_u16(50), Ok(0xBEEF));
702    }
703
704    // ── load/store f32 ──
705
706    #[test]
707    fn store_load_f32_roundtrip() {
708        let mut mem = Mem::try_new(1).unwrap();
709        mem.store_f32(300, core::f32::consts::PI).unwrap();
710        assert_eq!(mem.load_f32(300), Ok(core::f32::consts::PI));
711    }
712
713    // ── load/store f64 ──
714
715    #[test]
716    fn store_load_f64_roundtrip() {
717        let mut mem = Mem::try_new(1).unwrap();
718        mem.store_f64(400, core::f64::consts::E).unwrap();
719        assert_eq!(mem.load_f64(400), Ok(core::f64::consts::E));
720    }
721
722    // ── unchecked variants ──
723
724    #[test]
725    fn unchecked_i32_roundtrip() {
726        let mut mem = Mem::try_new(1).unwrap();
727        unsafe {
728            mem.store_i32_unchecked(100, 42);
729            assert_eq!(mem.load_i32_unchecked(100), 42);
730        }
731    }
732
733    #[test]
734    fn unchecked_i64_roundtrip() {
735        let mut mem = Mem::try_new(1).unwrap();
736        unsafe {
737            mem.store_i64_unchecked(200, -1i64);
738            assert_eq!(mem.load_i64_unchecked(200), -1i64);
739        }
740    }
741
742    // ── active_pages boundary ──
743
744    #[test]
745    fn access_beyond_active_pages_traps() {
746        // MAX_PAGES=2 but only 1 page active
747        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
748        // Within active region: OK
749        assert!(mem.load_i32(0).is_ok());
750        // Beyond active_pages but within backing array: still OOB
751        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
752    }
753
754    #[test]
755    fn grow_then_access_new_region() {
756        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
757        assert_eq!(mem.load_i32(PAGE_SIZE), Err(WasmTrap::OutOfBounds));
758        mem.grow(1);
759        // Now page 2 is active — access succeeds
760        assert!(mem.load_i32(PAGE_SIZE).is_ok());
761        mem.store_i32(PAGE_SIZE, 99).unwrap();
762        assert_eq!(mem.load_i32(PAGE_SIZE), Ok(99));
763    }
764
765    // ── init_data ──
766
767    #[test]
768    fn init_data_writes_bytes() {
769        let mut mem = Mem::try_new(1).unwrap();
770        mem.init_data(10, &[1u8, 2, 3, 4]).unwrap();
771        assert_eq!(mem.load_u8(10).unwrap(), 1);
772        assert_eq!(mem.load_u8(11).unwrap(), 2);
773        assert_eq!(mem.load_u8(12).unwrap(), 3);
774        assert_eq!(mem.load_u8(13).unwrap(), 4);
775    }
776
777    #[test]
778    fn init_data_empty_slice_is_noop() {
779        let mut mem = Mem::try_new(1).unwrap();
780        assert!(mem.init_data(0, &[]).is_ok());
781    }
782
783    #[test]
784    fn init_data_out_of_bounds() {
785        let mut mem = Mem::try_new(1).unwrap();
786        let data = [0u8; 10];
787        assert_eq!(
788            mem.init_data(PAGE_SIZE - 5, &data),
789            Err(WasmTrap::OutOfBounds)
790        );
791    }
792
793    #[test]
794    fn init_data_at_boundary() {
795        let mut mem = Mem::try_new(1).unwrap();
796        let data = [42u8; 4];
797        assert!(mem.init_data(PAGE_SIZE - 4, &data).is_ok());
798        assert_eq!(mem.load_u8(PAGE_SIZE - 1).unwrap(), 42);
799    }
800
801    #[test]
802    fn init_data_overwrites_existing() {
803        let mut mem = Mem::try_new(1).unwrap();
804        mem.store_u8(5, 0xFF).unwrap();
805        mem.init_data(5, &[0xABu8]).unwrap();
806        assert_eq!(mem.load_u8(5).unwrap(), 0xAB);
807    }
808
809    // ── fill ──
810
811    #[test]
812    fn fill_writes_byte_pattern() {
813        let mut mem = Mem::try_new(1).unwrap();
814        mem.fill(100, 0xAB, 5).unwrap();
815        for i in 0..5usize {
816            assert_eq!(mem.load_u8(100 + i).unwrap(), 0xAB);
817        }
818    }
819
820    #[test]
821    fn fill_zero_len_is_noop() {
822        let mut mem = Mem::try_new(1).unwrap();
823        assert!(mem.fill(0, 0xFF, 0).is_ok());
824    }
825
826    #[test]
827    fn fill_out_of_bounds() {
828        let mut mem = Mem::try_new(1).unwrap();
829        assert_eq!(mem.fill(PAGE_SIZE - 3, 0, 10), Err(WasmTrap::OutOfBounds));
830    }
831
832    #[test]
833    fn fill_at_boundary() {
834        let mut mem = Mem::try_new(1).unwrap();
835        assert!(mem.fill(PAGE_SIZE - 4, 0x42, 4).is_ok());
836        assert_eq!(mem.load_u8(PAGE_SIZE - 1).unwrap(), 0x42);
837    }
838
839    // ── init_data_partial ──
840
841    #[test]
842    fn init_data_partial_copies_subrange() {
843        let mut mem = Mem::try_new(1).unwrap();
844        let data = b"Hello, World!";
845        mem.init_data_partial(0, data, 7, 5).unwrap(); // "World"
846        assert_eq!(mem.load_u8(0).unwrap(), b'W');
847        assert_eq!(mem.load_u8(4).unwrap(), b'd');
848    }
849
850    #[test]
851    fn init_data_partial_zero_len_is_noop() {
852        let mut mem = Mem::try_new(1).unwrap();
853        assert!(mem.init_data_partial(0, b"Hello", 0, 0).is_ok());
854    }
855
856    #[test]
857    fn init_data_partial_full_segment() {
858        let mut mem = Mem::try_new(1).unwrap();
859        mem.init_data_partial(10, b"Hello", 0, 5).unwrap();
860        assert_eq!(mem.load_u8(10).unwrap(), b'H');
861        assert_eq!(mem.load_u8(14).unwrap(), b'o');
862    }
863
864    #[test]
865    fn init_data_partial_src_out_of_bounds() {
866        let mut mem = Mem::try_new(1).unwrap();
867        // src_offset=3, len=5: 3+5=8 > 5 (data.len())
868        assert_eq!(
869            mem.init_data_partial(0, b"Hello", 3, 5),
870            Err(WasmTrap::OutOfBounds)
871        );
872    }
873
874    #[test]
875    fn init_data_partial_dst_out_of_bounds() {
876        let mut mem = Mem::try_new(1).unwrap();
877        assert_eq!(
878            mem.init_data_partial(PAGE_SIZE - 2, b"Hello", 0, 5),
879            Err(WasmTrap::OutOfBounds)
880        );
881    }
882
883    #[test]
884    fn init_data_partial_src_offset_overflow() {
885        let mut mem = Mem::try_new(1).unwrap();
886        assert_eq!(
887            mem.init_data_partial(0, b"Hello", usize::MAX, 1),
888            Err(WasmTrap::OutOfBounds)
889        );
890    }
891
892    // ── little-endian encoding ──
893
894    #[test]
895    fn i32_is_little_endian() {
896        let mut mem = Mem::try_new(1).unwrap();
897        mem.store_i32(0, 0x04030201).unwrap();
898        assert_eq!(mem.load_u8(0), Ok(0x01));
899        assert_eq!(mem.load_u8(1), Ok(0x02));
900        assert_eq!(mem.load_u8(2), Ok(0x03));
901        assert_eq!(mem.load_u8(3), Ok(0x04));
902    }
903}
904
905// ── Kani Formal Verification Proofs ──────────────────────────────────────
906//
907// These proof harnesses exhaustively verify core invariants of IsolatedMemory
908// using Kani's bounded model checker. Run with: cargo kani -p herkos-runtime
909//
910// The proofs establish that:
911// - All load/store operations either succeed or return Err (never panic)
912// - grow respects MAX_PAGES and zero-initializes new pages
913// - Store/load roundtrips preserve values
914// - Offset overflow is handled correctly
915// - active_pages never exceeds MAX_PAGES
916
917#[cfg(kani)]
918mod proofs {
919    use super::*;
920
921    /// Proof: load_i32 never panics, only returns Ok or Err(OutOfBounds).
922    /// Verifies bounds checking correctness for all possible offsets.
923    #[kani::proof]
924    #[kani::unwind(1)]
925    fn load_i32_never_panics() {
926        let mem = IsolatedMemory::<4>::new(1); // 1 page active = 64 KiB
927        let offset: usize = kani::any();
928
929        // Should return Ok or Err(OutOfBounds), never panic
930        let result = mem.load_i32(offset);
931
932        // If successful, offset must be in valid range
933        if result.is_ok() {
934            kani::assert(
935                offset.checked_add(4).is_some(),
936                "successful load must not overflow",
937            );
938            kani::assert(
939                offset + 4 <= mem.active_size(),
940                "successful load must be within active region",
941            );
942        }
943    }
944
945    /// Proof: load_i64 never panics for any offset.
946    #[kani::proof]
947    #[kani::unwind(1)]
948    fn load_i64_never_panics() {
949        let mem = IsolatedMemory::<4>::new(2);
950        let offset: usize = kani::any();
951        let _ = mem.load_i64(offset);
952        // Just checking it doesn't panic - Kani verifies this exhaustively
953    }
954
955    /// Proof: load_u8 never panics for any offset.
956    #[kani::proof]
957    #[kani::unwind(1)]
958    fn load_u8_never_panics() {
959        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
960        let offset: usize = kani::any();
961        let _ = mem.load_u8(offset);
962    }
963
964    /// Proof: load_u16 never panics for any offset.
965    #[kani::proof]
966    #[kani::unwind(1)]
967    fn load_u16_never_panics() {
968        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
969        let offset: usize = kani::any();
970        let _ = mem.load_u16(offset);
971    }
972
973    /// Proof: load_f32 never panics for any offset.
974    #[kani::proof]
975    #[kani::unwind(1)]
976    fn load_f32_never_panics() {
977        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
978        let offset: usize = kani::any();
979        let _ = mem.load_f32(offset);
980    }
981
982    /// Proof: load_f64 never panics for any offset.
983    #[kani::proof]
984    #[kani::unwind(1)]
985    fn load_f64_never_panics() {
986        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
987        let offset: usize = kani::any();
988        let _ = mem.load_f64(offset);
989    }
990
991    /// Proof: store_i32 never panics for any offset and value.
992    #[kani::proof]
993    #[kani::unwind(1)]
994    fn store_i32_never_panics() {
995        let mut mem = IsolatedMemory::<4>::new(1);
996        let offset: usize = kani::any();
997        let value: i32 = kani::any();
998        let _ = mem.store_i32(offset, value);
999    }
1000
1001    /// Proof: store_i64 never panics for any offset and value.
1002    #[kani::proof]
1003    #[kani::unwind(1)]
1004    fn store_i64_never_panics() {
1005        let mut mem = IsolatedMemory::<4>::new(2);
1006        let offset: usize = kani::any();
1007        let value: i64 = kani::any();
1008        let _ = mem.store_i64(offset, value);
1009    }
1010
1011    /// Proof: store_u8 never panics for any offset and value.
1012    #[kani::proof]
1013    #[kani::unwind(1)]
1014    fn store_u8_never_panics() {
1015        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1016        let offset: usize = kani::any();
1017        let value: u8 = kani::any();
1018        let _ = mem.store_u8(offset, value);
1019    }
1020
1021    /// Proof: store_u16 never panics for any offset and value.
1022    #[kani::proof]
1023    #[kani::unwind(1)]
1024    fn store_u16_never_panics() {
1025        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1026        let offset: usize = kani::any();
1027        let value: u16 = kani::any();
1028        let _ = mem.store_u16(offset, value);
1029    }
1030
1031    /// Proof: store_f32 never panics for any offset and value.
1032    #[kani::proof]
1033    #[kani::unwind(1)]
1034    fn store_f32_never_panics() {
1035        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1036        let offset: usize = kani::any();
1037        let value: f32 = kani::any();
1038        let _ = mem.store_f32(offset, value);
1039    }
1040
1041    /// Proof: store_f64 never panics for any offset and value.
1042    #[kani::proof]
1043    #[kani::unwind(1)]
1044    fn store_f64_never_panics() {
1045        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1046        let offset: usize = kani::any();
1047        let value: f64 = kani::any();
1048        let _ = mem.store_f64(offset, value);
1049    }
1050
1051    /// Proof: grow respects MAX_PAGES — active_pages never exceeds it.
1052    #[kani::proof]
1053    #[kani::unwind(5)]
1054    fn grow_respects_max_pages() {
1055        let mut mem = IsolatedMemory::<4>::new(1);
1056        let delta: u32 = kani::any();
1057
1058        let old_pages = mem.page_count();
1059        let result = mem.grow(delta);
1060
1061        // active_pages must never exceed MAX_PAGES
1062        kani::assert(
1063            mem.page_count() <= 4,
1064            "active_pages must not exceed MAX_PAGES",
1065        );
1066
1067        // If grow succeeded, result should be old page count
1068        if result >= 0 {
1069            kani::assert(result == old_pages as i32, "grow returns old page count");
1070            // New page count is old + delta (if it fit)
1071            let new_expected = old_pages as u64 + delta as u64;
1072            if new_expected <= 4 {
1073                kani::assert(
1074                    mem.page_count() == new_expected as usize,
1075                    "grow updates active_pages correctly",
1076                );
1077            }
1078        } else {
1079            // If grow failed, active_pages unchanged
1080            kani::assert(
1081                mem.page_count() == old_pages,
1082                "failed grow leaves active_pages unchanged",
1083            );
1084        }
1085    }
1086
1087    /// Proof: grow returns -1 (failure) if new size would exceed MAX_PAGES.
1088    #[kani::proof]
1089    #[kani::unwind(4)]
1090    fn grow_fails_beyond_max() {
1091        let mut mem = IsolatedMemory::<4>::new(2);
1092        // Try to grow by 3 pages: 2 + 3 = 5 > 4 (MAX_PAGES)
1093        let result = mem.grow(3);
1094        kani::assert(result == -1, "grow beyond MAX_PAGES returns -1");
1095        kani::assert(mem.page_count() == 2, "failed grow leaves pages unchanged");
1096    }
1097
1098    /// Proof: store followed by load returns the same value (i32).
1099    #[kani::proof]
1100    #[kani::unwind(1)]
1101    fn store_load_roundtrip_i32() {
1102        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1103        let offset: usize = kani::any();
1104        let value: i32 = kani::any();
1105
1106        // If store succeeds, load at the same offset must return the same value
1107        if mem.store_i32(offset, value).is_ok() {
1108            let loaded = mem.load_i32(offset);
1109            kani::assert(loaded.is_ok(), "load succeeds after successful store");
1110            kani::assert(loaded.unwrap() == value, "load returns the stored value");
1111        }
1112    }
1113
1114    /// Proof: store followed by load returns the same value (i64).
1115    #[kani::proof]
1116    #[kani::unwind(1)]
1117    fn store_load_roundtrip_i64() {
1118        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1119        let offset: usize = kani::any();
1120        let value: i64 = kani::any();
1121
1122        if mem.store_i64(offset, value).is_ok() {
1123            kani::assert(
1124                mem.load_i64(offset) == Ok(value),
1125                "i64 roundtrip preserves value",
1126            );
1127        }
1128    }
1129
1130    /// Proof: store followed by load returns the same value (u8).
1131    #[kani::proof]
1132    #[kani::unwind(1)]
1133    fn store_load_roundtrip_u8() {
1134        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1135        let offset: usize = kani::any();
1136        let value: u8 = kani::any();
1137
1138        if mem.store_u8(offset, value).is_ok() {
1139            kani::assert(
1140                mem.load_u8(offset) == Ok(value),
1141                "u8 roundtrip preserves value",
1142            );
1143        }
1144    }
1145
1146    /// Proof: store followed by load returns the same value (u16).
1147    #[kani::proof]
1148    #[kani::unwind(1)]
1149    fn store_load_roundtrip_u16() {
1150        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1151        let offset: usize = kani::any();
1152        let value: u16 = kani::any();
1153
1154        if mem.store_u16(offset, value).is_ok() {
1155            kani::assert(
1156                mem.load_u16(offset) == Ok(value),
1157                "u16 roundtrip preserves value",
1158            );
1159        }
1160    }
1161
1162    /// Proof: grow zero-initializes new pages.
1163    #[kani::proof]
1164    #[kani::unwind(2)]
1165    fn grow_zeroes_new_pages() {
1166        let mut mem = IsolatedMemory::<2>::try_new(1).unwrap();
1167
1168        let result = mem.grow(1);
1169
1170        if result >= 0 {
1171            // After grow, the new page should be zero
1172            // Read a value from the newly activated page
1173            let value = mem.load_i32(PAGE_SIZE);
1174            if value.is_ok() {
1175                kani::assert(value.unwrap() == 0, "newly grown page is zero-initialized");
1176            }
1177        }
1178    }
1179
1180    /// Proof: offset overflow is handled safely (no panic, returns OutOfBounds).
1181    #[kani::proof]
1182    #[kani::unwind(1)]
1183    fn offset_overflow_handled() {
1184        let mem = IsolatedMemory::<1>::try_new(1).unwrap();
1185        // Try to load at maximum possible offset (will overflow when adding size)
1186        let result = mem.load_i32(usize::MAX);
1187        kani::assert(
1188            result == Err(WasmTrap::OutOfBounds),
1189            "overflow offset returns OutOfBounds",
1190        );
1191    }
1192
1193    /// Proof: accesses beyond active_pages (but within MAX_PAGES) are rejected.
1194    #[kani::proof]
1195    #[kani::unwind(1)]
1196    fn access_beyond_active_pages_rejected() {
1197        // MAX_PAGES=2 but only 1 active
1198        let mem = IsolatedMemory::<2>::try_new(1).unwrap();
1199
1200        // Access in first page: should succeed
1201        let result1 = mem.load_i32(0);
1202        kani::assert(result1.is_ok(), "access within active pages succeeds");
1203
1204        // Access in second page (not active yet): should fail
1205        let result2 = mem.load_i32(PAGE_SIZE);
1206        kani::assert(
1207            result2 == Err(WasmTrap::OutOfBounds),
1208            "access beyond active_pages is rejected",
1209        );
1210    }
1211
1212    /// Proof: active_size always equals active_pages * PAGE_SIZE.
1213    #[kani::proof]
1214    #[kani::unwind(1)]
1215    fn active_size_invariant() {
1216        let mem = IsolatedMemory::<4>::new(2);
1217        kani::assert(
1218            mem.active_size() == mem.page_count() * PAGE_SIZE,
1219            "active_size = active_pages * PAGE_SIZE",
1220        );
1221    }
1222
1223    /// Proof: size() returns active_pages as i32.
1224    #[kani::proof]
1225    #[kani::unwind(1)]
1226    fn size_returns_page_count() {
1227        let mem = IsolatedMemory::<4>::new(3);
1228        kani::assert(
1229            mem.size() == mem.page_count() as i32,
1230            "size() returns active_pages",
1231        );
1232    }
1233
1234    /// Proof: successful load requires offset + type_size <= active_size.
1235    #[kani::proof]
1236    #[kani::unwind(1)]
1237    fn load_success_implies_valid_range() {
1238        let mem = IsolatedMemory::<1>::try_new(1).unwrap();
1239        let offset: usize = kani::any();
1240
1241        let result = mem.load_i32(offset);
1242
1243        if result.is_ok() {
1244            // Success implies: offset + 4 <= active_size and no overflow
1245            let end = offset.checked_add(4);
1246            kani::assert(end.is_some(), "successful load offset does not overflow");
1247            kani::assert(
1248                end.unwrap() <= mem.active_size(),
1249                "successful load is within bounds",
1250            );
1251        }
1252    }
1253
1254    /// Proof: successful store requires offset + type_size <= active_size.
1255    #[kani::proof]
1256    #[kani::unwind(1)]
1257    fn store_success_implies_valid_range() {
1258        let mut mem = IsolatedMemory::<1>::try_new(1).unwrap();
1259        let offset: usize = kani::any();
1260        let value: i64 = kani::any();
1261
1262        let result = mem.store_i64(offset, value);
1263
1264        if result.is_ok() {
1265            let end = offset.checked_add(8);
1266            kani::assert(end.is_some(), "successful store offset does not overflow");
1267            kani::assert(
1268                end.unwrap() <= mem.active_size(),
1269                "successful store is within bounds",
1270            );
1271        }
1272    }
1273
1274    /// Proof: as_slice returns a slice of exactly active_size bytes.
1275    #[kani::proof]
1276    #[kani::unwind(1)]
1277    fn as_slice_length_correct() {
1278        let mem = IsolatedMemory::<4>::new(2);
1279        let slice = mem.as_slice();
1280        kani::assert(
1281            slice.len() == mem.active_size(),
1282            "as_slice length equals active_size",
1283        );
1284    }
1285
1286    /// Proof: as_mut_slice returns a slice of exactly active_size bytes.
1287    #[kani::proof]
1288    #[kani::unwind(1)]
1289    fn as_mut_slice_length_correct() {
1290        let mut mem = IsolatedMemory::<4>::new(2);
1291        let slice = mem.as_mut_slice();
1292        kani::assert(
1293            slice.len() == mem.active_size(),
1294            "as_mut_slice length equals active_size",
1295        );
1296    }
1297}