1#![doc = include_str!("../README.md")]
2mod base_vec;
3pub mod btreemap;
4pub mod cell;
5pub use cell::{Cell as StableCell, Cell};
6pub mod btreeset;
7pub mod file_mem;
8#[cfg(target_arch = "wasm32")]
9mod ic0_memory; pub mod log;
11pub mod memory_manager;
12pub mod min_heap;
13pub mod reader;
14pub mod storable;
15#[cfg(test)]
16mod tests;
17mod types;
18pub mod vec;
19pub mod vec_mem;
20pub mod writer;
21
22pub use btreemap::{BTreeMap, BTreeMap as StableBTreeMap};
23pub use btreeset::{BTreeSet, BTreeSet as StableBTreeSet};
24pub use file_mem::FileMemory;
25#[cfg(target_arch = "wasm32")]
26pub use ic0_memory::Ic0StableMemory;
27pub use log::{Log as StableLog, Log};
28pub use min_heap::{MinHeap, MinHeap as StableMinHeap};
29use std::error;
30use std::fmt::{Display, Formatter};
31use std::mem::MaybeUninit;
32pub use storable::Storable;
33use types::Address;
34pub use vec::{Vec as StableVec, Vec};
35pub use vec_mem::VectorMemory;
36
37#[cfg(target_arch = "wasm32")]
38pub type DefaultMemoryImpl = Ic0StableMemory;
39
40#[cfg(not(target_arch = "wasm32"))]
41pub type DefaultMemoryImpl = VectorMemory;
42
43const WASM_PAGE_SIZE: u64 = 65536;
44
45pub const MAX_PAGES: u64 = u64::MAX / WASM_PAGE_SIZE;
47
48pub trait Memory {
53 fn size(&self) -> u64;
57
58 fn grow(&self, pages: u64) -> i64;
63
64 fn read(&self, offset: u64, dst: &mut [u8]);
68
69 #[inline]
82 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
83 std::ptr::write_bytes(dst, 0, count);
85 let slice = std::slice::from_raw_parts_mut(dst, count);
86 self.read(offset, slice)
87 }
88
89 fn write(&self, offset: u64, src: &[u8]);
93}
94
95#[inline]
102fn read_to_vec<M: Memory>(m: &M, addr: Address, dst: &mut std::vec::Vec<u8>, count: usize) {
103 dst.clear();
104 dst.reserve_exact(count);
105 unsafe {
106 m.read_unsafe(addr.get(), dst.as_mut_ptr(), count);
107 dst.set_len(count);
109 }
110}
111
112fn read_u32<M: Memory>(m: &M, addr: Address) -> u32 {
115 let mut buf: [u8; 4] = [0; 4];
116 m.read(addr.get(), &mut buf);
117 u32::from_le_bytes(buf)
118}
119
120fn read_u64<M: Memory>(m: &M, addr: Address) -> u64 {
123 let mut buf: [u8; 8] = [0; 8];
124 m.read(addr.get(), &mut buf);
125 u64::from_le_bytes(buf)
126}
127
128fn read_address_vec<M: Memory>(m: &M, addr: Address, count: usize) -> std::vec::Vec<Address> {
130 let chunk_size = Address::size().get() as usize;
131 assert_eq!(chunk_size, 8);
132 let byte_len = count * chunk_size;
133 let mut buf = std::vec::Vec::with_capacity(byte_len);
134 read_to_vec(m, addr, &mut buf, byte_len);
135
136 buf.chunks_exact(chunk_size)
137 .map(|chunk| {
138 let bytes: [u8; 8] = chunk.try_into().expect("chunk must be 8 bytes");
140 Address::from(u64::from_le_bytes(bytes))
141 })
142 .collect()
143}
144
145fn write_u32<M: Memory>(m: &M, addr: Address, val: u32) {
147 write(m, addr.get(), &val.to_le_bytes());
148}
149
150fn write_u64<M: Memory>(m: &M, addr: Address, val: u64) {
152 write(m, addr.get(), &val.to_le_bytes());
153}
154
155#[derive(Debug, PartialEq, Eq)]
156pub struct GrowFailed {
157 current_size: u64,
158 delta: u64,
159}
160
161impl Display for GrowFailed {
162 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
163 write!(
164 f,
165 "Failed to grow memory: current size={}, delta={}",
166 self.current_size, self.delta
167 )
168 }
169}
170
171impl error::Error for GrowFailed {}
172
173fn safe_write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) -> Result<(), GrowFailed> {
175 let last_byte = offset
176 .checked_add(bytes.len() as u64)
177 .expect("Address space overflow");
178
179 let size_pages = memory.size();
180 let size_bytes = size_pages
181 .checked_mul(WASM_PAGE_SIZE)
182 .expect("Address space overflow");
183
184 if size_bytes < last_byte {
185 let diff_bytes = last_byte - size_bytes;
186 let diff_pages = diff_bytes
187 .checked_add(WASM_PAGE_SIZE - 1)
188 .expect("Address space overflow")
189 / WASM_PAGE_SIZE;
190 if memory.grow(diff_pages) == -1 {
191 return Err(GrowFailed {
192 current_size: size_pages,
193 delta: diff_pages,
194 });
195 }
196 }
197 memory.write(offset, bytes);
198 Ok(())
199}
200
201fn write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) {
203 if let Err(GrowFailed {
204 current_size,
205 delta,
206 }) = safe_write(memory, offset, bytes)
207 {
208 panic!(
209 "Failed to grow memory from {} pages to {} pages (delta = {} pages).",
210 current_size,
211 current_size + delta,
212 delta
213 );
214 }
215}
216
217fn read_struct<T, M: Memory>(addr: Address, memory: &M) -> T {
219 let mut value = MaybeUninit::<T>::uninit();
220 unsafe {
221 memory.read_unsafe(
222 addr.get(),
223 value.as_mut_ptr() as *mut u8,
224 core::mem::size_of::<T>(),
225 );
226 value.assume_init()
227 }
228}
229
230fn write_struct<T, M: Memory>(t: &T, addr: Address, memory: &M) {
232 let slice = unsafe {
233 core::slice::from_raw_parts(t as *const _ as *const u8, core::mem::size_of::<T>())
234 };
235
236 write(memory, addr.get(), slice)
237}
238
239#[derive(Clone)]
243pub struct RestrictedMemory<M: Memory> {
244 page_range: core::ops::Range<u64>,
245 memory: M,
246}
247
248impl<M: Memory> RestrictedMemory<M> {
249 pub fn new(memory: M, page_range: core::ops::Range<u64>) -> Self {
250 assert!(page_range.end <= MAX_PAGES);
251 Self { memory, page_range }
252 }
253}
254
255impl<M: Memory> Memory for RestrictedMemory<M> {
256 fn size(&self) -> u64 {
257 let base_size = self.memory.size();
258 if base_size < self.page_range.start {
259 0
260 } else if base_size > self.page_range.end {
261 self.page_range.end - self.page_range.start
262 } else {
263 base_size - self.page_range.start
264 }
265 }
266
267 fn grow(&self, delta: u64) -> i64 {
268 let base_size = self.memory.size();
269 if base_size < self.page_range.start {
270 self.memory
271 .grow(self.page_range.start - base_size + delta)
272 .min(0)
273 } else if base_size >= self.page_range.end {
274 if delta == 0 {
275 (self.page_range.end - self.page_range.start) as i64
276 } else {
277 -1
278 }
279 } else {
280 let pages_left = self.page_range.end - base_size;
281 if pages_left < delta {
282 -1
283 } else {
284 let r = self.memory.grow(delta);
285 if r < 0 {
286 r
287 } else {
288 r - self.page_range.start as i64
289 }
290 }
291 }
292 }
293
294 fn read(&self, offset: u64, dst: &mut [u8]) {
295 self.memory
296 .read(self.page_range.start * WASM_PAGE_SIZE + offset, dst)
297 }
298
299 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
300 self.memory
301 .read_unsafe(self.page_range.start * WASM_PAGE_SIZE + offset, dst, count)
302 }
303
304 fn write(&self, offset: u64, src: &[u8]) {
305 self.memory
306 .write(self.page_range.start * WASM_PAGE_SIZE + offset, src)
307 }
308}