ic_stable_structures/
lib.rs

1#![doc = include_str!("../README.md")]
2mod base_vec;
3pub mod btreemap;
4pub mod cell;
5pub use cell::{Cell as StableCell, Cell};
6pub mod btreeset;
7pub mod file_mem;
8#[cfg(target_arch = "wasm32")]
9mod ic0_memory; // Memory API for canisters.
10pub mod log;
11pub mod memory_manager;
12pub mod min_heap;
13pub mod reader;
14pub mod storable;
15#[cfg(test)]
16mod tests;
17mod types;
18pub mod vec;
19pub mod vec_mem;
20pub mod writer;
21
22pub use btreemap::{BTreeMap, BTreeMap as StableBTreeMap};
23pub use btreeset::{BTreeSet, BTreeSet as StableBTreeSet};
24pub use file_mem::FileMemory;
25#[cfg(target_arch = "wasm32")]
26pub use ic0_memory::Ic0StableMemory;
27pub use log::{Log as StableLog, Log};
28pub use min_heap::{MinHeap, MinHeap as StableMinHeap};
29use std::error;
30use std::fmt::{Display, Formatter};
31use std::mem::MaybeUninit;
32pub use storable::Storable;
33use types::Address;
34pub use vec::{Vec as StableVec, Vec};
35pub use vec_mem::VectorMemory;
36
37#[cfg(target_arch = "wasm32")]
38pub type DefaultMemoryImpl = Ic0StableMemory;
39
40#[cfg(not(target_arch = "wasm32"))]
41pub type DefaultMemoryImpl = VectorMemory;
42
43const WASM_PAGE_SIZE: u64 = 65536;
44
45/// The maximum number of stable memory pages a canister can address.
46pub const MAX_PAGES: u64 = u64::MAX / WASM_PAGE_SIZE;
47
48/// Abstraction over a WebAssembly-style linear memory (e.g., stable memory).
49///
50/// Implementations are expected to mirror WebAssembly semantics:
51/// out-of-bounds accesses will cause a panic (in native) or trap (in Wasm).
52pub trait Memory {
53    /// Returns the current size of the memory in WebAssembly pages.
54    ///
55    /// One WebAssembly page is 64 KiB.
56    fn size(&self) -> u64;
57
58    /// Grows the memory by `pages` pages filled with zeroes.
59    ///
60    /// Returns the previous size in pages on success, or -1 if the
61    /// memory could not be grown.
62    fn grow(&self, pages: u64) -> i64;
63
64    /// Copies `dst.len()` bytes from memory starting at `offset` into `dst`.
65    ///
66    /// Panics or traps if the read would go out of bounds.
67    fn read(&self, offset: u64, dst: &mut [u8]);
68
69    /// Unsafe variant of `read` for advanced use.
70    ///
71    /// Copies `count` bytes from memory starting at `offset` into the
72    /// raw pointer `dst`. Initializes the destination before reading.
73    ///
74    /// # Safety
75    ///
76    /// Caller must ensure:
77    /// - `dst` points to valid writable memory of at least `count` bytes.
78    /// - The memory range `dst..dst+count` does not overlap with `self`.
79    ///
80    /// Panics or traps if the read would go out of bounds.
81    #[inline]
82    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
83        // Initialize the buffer to make the slice valid.
84        std::ptr::write_bytes(dst, 0, count);
85        let slice = std::slice::from_raw_parts_mut(dst, count);
86        self.read(offset, slice)
87    }
88
89    /// Copies `src.len()` bytes from `src` into memory starting at `offset`.
90    ///
91    /// Panics or traps if the write would go out of bounds.
92    fn write(&self, offset: u64, src: &[u8]);
93}
94
95/// Copies `count` bytes of data starting from `addr` out of the stable memory into `dst`.
96///
97/// Callers are allowed to pass vectors in any state (e.g. empty vectors).
98/// After the method returns, `dst.len() == count`.
99/// This method is an alternative to `read` which does not require initializing a buffer and may
100/// therefore be faster.
101#[inline]
102fn read_to_vec<M: Memory>(m: &M, addr: Address, dst: &mut std::vec::Vec<u8>, count: usize) {
103    dst.clear();
104    dst.reserve_exact(count);
105    unsafe {
106        m.read_unsafe(addr.get(), dst.as_mut_ptr(), count);
107        // SAFETY: read_unsafe guarantees to initialize the first `count` bytes
108        dst.set_len(count);
109    }
110}
111
112/// A helper function that reads a single 32bit integer encoded as
113/// little-endian from the specified memory at the specified offset.
114fn read_u32<M: Memory>(m: &M, addr: Address) -> u32 {
115    let mut buf: [u8; 4] = [0; 4];
116    m.read(addr.get(), &mut buf);
117    u32::from_le_bytes(buf)
118}
119
120/// A helper function that reads a single 64bit integer encoded as
121/// little-endian from the specified memory at the specified offset.
122fn read_u64<M: Memory>(m: &M, addr: Address) -> u64 {
123    let mut buf: [u8; 8] = [0; 8];
124    m.read(addr.get(), &mut buf);
125    u64::from_le_bytes(buf)
126}
127
128/// Reads `count` Address values with maximum performance using unsafe code.
129fn read_address_vec<M: Memory>(m: &M, addr: Address, count: usize) -> std::vec::Vec<Address> {
130    let chunk_size = Address::size().get() as usize;
131    assert_eq!(chunk_size, 8);
132    let byte_len = count * chunk_size;
133    let mut buf = std::vec::Vec::with_capacity(byte_len);
134    read_to_vec(m, addr, &mut buf, byte_len);
135
136    buf.chunks_exact(chunk_size)
137        .map(|chunk| {
138            // SAFETY: chunks_exact(chunk_size) ensures each chunk has exactly 8 bytes
139            let bytes: [u8; 8] = chunk.try_into().expect("chunk must be 8 bytes");
140            Address::from(u64::from_le_bytes(bytes))
141        })
142        .collect()
143}
144
145/// Writes a single 32-bit integer encoded as little-endian.
146fn write_u32<M: Memory>(m: &M, addr: Address, val: u32) {
147    write(m, addr.get(), &val.to_le_bytes());
148}
149
150/// Writes a single 64-bit integer encoded as little-endian.
151fn write_u64<M: Memory>(m: &M, addr: Address, val: u64) {
152    write(m, addr.get(), &val.to_le_bytes());
153}
154
155#[derive(Debug, PartialEq, Eq)]
156pub struct GrowFailed {
157    current_size: u64,
158    delta: u64,
159}
160
161impl Display for GrowFailed {
162    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
163        write!(
164            f,
165            "Failed to grow memory: current size={}, delta={}",
166            self.current_size, self.delta
167        )
168    }
169}
170
171impl error::Error for GrowFailed {}
172
173/// Writes the bytes at the specified offset, growing the memory size if needed.
174fn safe_write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) -> Result<(), GrowFailed> {
175    let last_byte = offset
176        .checked_add(bytes.len() as u64)
177        .expect("Address space overflow");
178
179    let size_pages = memory.size();
180    let size_bytes = size_pages
181        .checked_mul(WASM_PAGE_SIZE)
182        .expect("Address space overflow");
183
184    if size_bytes < last_byte {
185        let diff_bytes = last_byte - size_bytes;
186        let diff_pages = diff_bytes
187            .checked_add(WASM_PAGE_SIZE - 1)
188            .expect("Address space overflow")
189            / WASM_PAGE_SIZE;
190        if memory.grow(diff_pages) == -1 {
191            return Err(GrowFailed {
192                current_size: size_pages,
193                delta: diff_pages,
194            });
195        }
196    }
197    memory.write(offset, bytes);
198    Ok(())
199}
200
201/// Like [safe_write], but panics if the memory.grow fails.
202fn write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) {
203    if let Err(GrowFailed {
204        current_size,
205        delta,
206    }) = safe_write(memory, offset, bytes)
207    {
208        panic!(
209            "Failed to grow memory from {} pages to {} pages (delta = {} pages).",
210            current_size,
211            current_size + delta,
212            delta
213        );
214    }
215}
216
217/// Reads a struct from memory.
218fn read_struct<T, M: Memory>(addr: Address, memory: &M) -> T {
219    let mut value = MaybeUninit::<T>::uninit();
220    unsafe {
221        memory.read_unsafe(
222            addr.get(),
223            value.as_mut_ptr() as *mut u8,
224            core::mem::size_of::<T>(),
225        );
226        value.assume_init()
227    }
228}
229
230/// Writes a struct to memory.
231fn write_struct<T, M: Memory>(t: &T, addr: Address, memory: &M) {
232    let slice = unsafe {
233        core::slice::from_raw_parts(t as *const _ as *const u8, core::mem::size_of::<T>())
234    };
235
236    write(memory, addr.get(), slice)
237}
238
239/// RestrictedMemory creates a limited view of another memory.  This
240/// allows one to divide the main memory into non-intersecting ranges
241/// and use different layouts in each region.
242#[derive(Clone)]
243pub struct RestrictedMemory<M: Memory> {
244    page_range: core::ops::Range<u64>,
245    memory: M,
246}
247
248impl<M: Memory> RestrictedMemory<M> {
249    pub fn new(memory: M, page_range: core::ops::Range<u64>) -> Self {
250        assert!(page_range.end <= MAX_PAGES);
251        Self { memory, page_range }
252    }
253}
254
255impl<M: Memory> Memory for RestrictedMemory<M> {
256    fn size(&self) -> u64 {
257        let base_size = self.memory.size();
258        if base_size < self.page_range.start {
259            0
260        } else if base_size > self.page_range.end {
261            self.page_range.end - self.page_range.start
262        } else {
263            base_size - self.page_range.start
264        }
265    }
266
267    fn grow(&self, delta: u64) -> i64 {
268        let base_size = self.memory.size();
269        if base_size < self.page_range.start {
270            self.memory
271                .grow(self.page_range.start - base_size + delta)
272                .min(0)
273        } else if base_size >= self.page_range.end {
274            if delta == 0 {
275                (self.page_range.end - self.page_range.start) as i64
276            } else {
277                -1
278            }
279        } else {
280            let pages_left = self.page_range.end - base_size;
281            if pages_left < delta {
282                -1
283            } else {
284                let r = self.memory.grow(delta);
285                if r < 0 {
286                    r
287                } else {
288                    r - self.page_range.start as i64
289                }
290            }
291        }
292    }
293
294    fn read(&self, offset: u64, dst: &mut [u8]) {
295        self.memory
296            .read(self.page_range.start * WASM_PAGE_SIZE + offset, dst)
297    }
298
299    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
300        self.memory
301            .read_unsafe(self.page_range.start * WASM_PAGE_SIZE + offset, dst, count)
302    }
303
304    fn write(&self, offset: u64, src: &[u8]) {
305        self.memory
306            .write(self.page_range.start * WASM_PAGE_SIZE + offset, src)
307    }
308}