ic_stable_structures/
lib.rs

1#![doc = include_str!("../README.md")]
2mod base_vec;
3pub mod btreemap;
4pub mod cell;
5pub use cell::{Cell as StableCell, Cell};
6pub mod file_mem;
7#[cfg(target_arch = "wasm32")]
8mod ic0_memory; // Memory API for canisters.
9pub mod log;
10pub use log::{Log as StableLog, Log};
11pub mod btreeset;
12pub mod memory_manager;
13pub mod min_heap;
14pub mod reader;
15pub mod storable;
16#[cfg(test)]
17mod tests;
18mod types;
19pub mod vec;
20pub use min_heap::{MinHeap, MinHeap as StableMinHeap};
21pub use vec::{Vec as StableVec, Vec};
22pub mod vec_mem;
23pub mod writer;
24pub use btreemap::{BTreeMap, BTreeMap as StableBTreeMap};
25pub use btreeset::{BTreeSet, BTreeSet as StableBTreeSet};
26pub use file_mem::FileMemory;
27#[cfg(target_arch = "wasm32")]
28pub use ic0_memory::Ic0StableMemory;
29use std::error;
30use std::fmt::{Display, Formatter};
31use std::mem::MaybeUninit;
32pub use storable::Storable;
33use types::Address;
34pub use vec_mem::VectorMemory;
35
36#[cfg(target_arch = "wasm32")]
37pub type DefaultMemoryImpl = Ic0StableMemory;
38
39#[cfg(not(target_arch = "wasm32"))]
40pub type DefaultMemoryImpl = VectorMemory;
41
42const WASM_PAGE_SIZE: u64 = 65536;
43
44/// The maximum number of stable memory pages a canister can address.
45pub const MAX_PAGES: u64 = u64::MAX / WASM_PAGE_SIZE;
46
47pub trait Memory {
48    /// Returns the current size of the stable memory in WebAssembly
49    /// pages. (One WebAssembly page is 64Ki bytes.)
50    fn size(&self) -> u64;
51
52    /// Tries to grow the memory by `pages` many pages containing
53    /// zeroes.  If successful, returns the previous size of the
54    /// memory (in pages).  Otherwise, returns -1.
55    fn grow(&self, pages: u64) -> i64;
56
57    /// Copies the data referred to by `offset` out of the stable memory
58    /// and replaces the corresponding bytes in `dst`.
59    fn read(&self, offset: u64, dst: &mut [u8]);
60
61    /// Copies `count` number of bytes of the data starting from `offset` out of the stable memory
62    /// into the buffer starting at `dst`.
63    ///
64    /// This method is an alternative to `read` which does not require initializing a buffer and may
65    /// therefore be faster.
66    ///
67    /// # Safety
68    ///
69    /// Callers must guarantee that
70    ///   * it is valid to write `count` number of bytes starting from `dst`,
71    ///   * `dst..dst + count` does not overlap with `self`.
72    ///
73    /// Implementations must guarantee that before the method returns, `count` number of bytes
74    /// starting from `dst` will be initialized.
75    #[inline]
76    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
77        // Initialize the buffer to make the slice valid.
78        std::ptr::write_bytes(dst, 0, count);
79        let slice = std::slice::from_raw_parts_mut(dst, count);
80        self.read(offset, slice)
81    }
82
83    /// Copies the data referred to by `src` and replaces the
84    /// corresponding segment starting at offset in the stable memory.
85    fn write(&self, offset: u64, src: &[u8]);
86}
87
88/// Copies `count` bytes of data starting from `addr` out of the stable memory into `dst`.
89///
90/// Callers are allowed to pass vectors in any state (e.g. empty vectors).
91/// After the method returns, `dst.len() == count`.
92/// This method is an alternative to `read` which does not require initializing a buffer and may
93/// therefore be faster.
94#[inline]
95fn read_to_vec<M: Memory>(m: &M, addr: Address, dst: &mut std::vec::Vec<u8>, count: usize) {
96    dst.clear();
97    dst.reserve_exact(count);
98    unsafe {
99        m.read_unsafe(addr.get(), dst.as_mut_ptr(), count);
100        // SAFETY: read_unsafe guarantees to initialize the first `count` bytes
101        dst.set_len(count);
102    }
103}
104
105/// A helper function that reads a single 32bit integer encoded as
106/// little-endian from the specified memory at the specified offset.
107fn read_u32<M: Memory>(m: &M, addr: Address) -> u32 {
108    let mut buf: [u8; 4] = [0; 4];
109    m.read(addr.get(), &mut buf);
110    u32::from_le_bytes(buf)
111}
112
113/// A helper function that reads a single 64bit integer encoded as
114/// little-endian from the specified memory at the specified offset.
115fn read_u64<M: Memory>(m: &M, addr: Address) -> u64 {
116    let mut buf: [u8; 8] = [0; 8];
117    m.read(addr.get(), &mut buf);
118    u64::from_le_bytes(buf)
119}
120
121/// Writes a single 32-bit integer encoded as little-endian.
122fn write_u32<M: Memory>(m: &M, addr: Address, val: u32) {
123    write(m, addr.get(), &val.to_le_bytes());
124}
125
126/// Writes a single 64-bit integer encoded as little-endian.
127fn write_u64<M: Memory>(m: &M, addr: Address, val: u64) {
128    write(m, addr.get(), &val.to_le_bytes());
129}
130
131#[derive(Debug, PartialEq, Eq)]
132pub struct GrowFailed {
133    current_size: u64,
134    delta: u64,
135}
136
137impl Display for GrowFailed {
138    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
139        write!(
140            f,
141            "Failed to grow memory: current size={}, delta={}",
142            self.current_size, self.delta
143        )
144    }
145}
146
147impl error::Error for GrowFailed {}
148
149/// Writes the bytes at the specified offset, growing the memory size if needed.
150fn safe_write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) -> Result<(), GrowFailed> {
151    let last_byte = offset
152        .checked_add(bytes.len() as u64)
153        .expect("Address space overflow");
154
155    let size_pages = memory.size();
156    let size_bytes = size_pages
157        .checked_mul(WASM_PAGE_SIZE)
158        .expect("Address space overflow");
159
160    if size_bytes < last_byte {
161        let diff_bytes = last_byte - size_bytes;
162        let diff_pages = diff_bytes
163            .checked_add(WASM_PAGE_SIZE - 1)
164            .expect("Address space overflow")
165            / WASM_PAGE_SIZE;
166        if memory.grow(diff_pages) == -1 {
167            return Err(GrowFailed {
168                current_size: size_pages,
169                delta: diff_pages,
170            });
171        }
172    }
173    memory.write(offset, bytes);
174    Ok(())
175}
176
177/// Like [safe_write], but panics if the memory.grow fails.
178fn write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) {
179    if let Err(GrowFailed {
180        current_size,
181        delta,
182    }) = safe_write(memory, offset, bytes)
183    {
184        panic!(
185            "Failed to grow memory from {} pages to {} pages (delta = {} pages).",
186            current_size,
187            current_size + delta,
188            delta
189        );
190    }
191}
192
193/// Reads a struct from memory.
194fn read_struct<T, M: Memory>(addr: Address, memory: &M) -> T {
195    let mut value = MaybeUninit::<T>::uninit();
196    unsafe {
197        memory.read_unsafe(
198            addr.get(),
199            value.as_mut_ptr() as *mut u8,
200            core::mem::size_of::<T>(),
201        );
202        value.assume_init()
203    }
204}
205
206/// Writes a struct to memory.
207fn write_struct<T, M: Memory>(t: &T, addr: Address, memory: &M) {
208    let slice = unsafe {
209        core::slice::from_raw_parts(t as *const _ as *const u8, core::mem::size_of::<T>())
210    };
211
212    write(memory, addr.get(), slice)
213}
214
215/// RestrictedMemory creates a limited view of another memory.  This
216/// allows one to divide the main memory into non-intersecting ranges
217/// and use different layouts in each region.
218#[derive(Clone)]
219pub struct RestrictedMemory<M: Memory> {
220    page_range: core::ops::Range<u64>,
221    memory: M,
222}
223
224impl<M: Memory> RestrictedMemory<M> {
225    pub fn new(memory: M, page_range: core::ops::Range<u64>) -> Self {
226        assert!(page_range.end <= MAX_PAGES);
227        Self { memory, page_range }
228    }
229}
230
231impl<M: Memory> Memory for RestrictedMemory<M> {
232    fn size(&self) -> u64 {
233        let base_size = self.memory.size();
234        if base_size < self.page_range.start {
235            0
236        } else if base_size > self.page_range.end {
237            self.page_range.end - self.page_range.start
238        } else {
239            base_size - self.page_range.start
240        }
241    }
242
243    fn grow(&self, delta: u64) -> i64 {
244        let base_size = self.memory.size();
245        if base_size < self.page_range.start {
246            self.memory
247                .grow(self.page_range.start - base_size + delta)
248                .min(0)
249        } else if base_size >= self.page_range.end {
250            if delta == 0 {
251                (self.page_range.end - self.page_range.start) as i64
252            } else {
253                -1
254            }
255        } else {
256            let pages_left = self.page_range.end - base_size;
257            if pages_left < delta {
258                -1
259            } else {
260                let r = self.memory.grow(delta);
261                if r < 0 {
262                    r
263                } else {
264                    r - self.page_range.start as i64
265                }
266            }
267        }
268    }
269
270    fn read(&self, offset: u64, dst: &mut [u8]) {
271        self.memory
272            .read(self.page_range.start * WASM_PAGE_SIZE + offset, dst)
273    }
274
275    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
276        self.memory
277            .read_unsafe(self.page_range.start * WASM_PAGE_SIZE + offset, dst, count)
278    }
279
280    fn write(&self, offset: u64, src: &[u8]) {
281        self.memory
282            .write(self.page_range.start * WASM_PAGE_SIZE + offset, src)
283    }
284}