1#![doc = include_str!("../README.md")]
2mod base_vec;
3pub mod btreemap;
4pub mod cell;
5pub use cell::{Cell as StableCell, Cell};
6pub mod file_mem;
7#[cfg(target_arch = "wasm32")]
8mod ic0_memory; pub mod log;
10pub use log::{Log as StableLog, Log};
11pub mod btreeset;
12pub mod memory_manager;
13pub mod min_heap;
14pub mod reader;
15pub mod storable;
16#[cfg(test)]
17mod tests;
18mod types;
19pub mod vec;
20pub use min_heap::{MinHeap, MinHeap as StableMinHeap};
21pub use vec::{Vec as StableVec, Vec};
22pub mod vec_mem;
23pub mod writer;
24pub use btreemap::{BTreeMap, BTreeMap as StableBTreeMap};
25pub use btreeset::{BTreeSet, BTreeSet as StableBTreeSet};
26pub use file_mem::FileMemory;
27#[cfg(target_arch = "wasm32")]
28pub use ic0_memory::Ic0StableMemory;
29use std::error;
30use std::fmt::{Display, Formatter};
31use std::mem::MaybeUninit;
32pub use storable::Storable;
33use types::Address;
34pub use vec_mem::VectorMemory;
35
36#[cfg(target_arch = "wasm32")]
37pub type DefaultMemoryImpl = Ic0StableMemory;
38
39#[cfg(not(target_arch = "wasm32"))]
40pub type DefaultMemoryImpl = VectorMemory;
41
42const WASM_PAGE_SIZE: u64 = 65536;
43
44pub const MAX_PAGES: u64 = u64::MAX / WASM_PAGE_SIZE;
46
47pub trait Memory {
48 fn size(&self) -> u64;
51
52 fn grow(&self, pages: u64) -> i64;
56
57 fn read(&self, offset: u64, dst: &mut [u8]);
60
61 #[inline]
76 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
77 std::ptr::write_bytes(dst, 0, count);
79 let slice = std::slice::from_raw_parts_mut(dst, count);
80 self.read(offset, slice)
81 }
82
83 fn write(&self, offset: u64, src: &[u8]);
86}
87
88#[inline]
95fn read_to_vec<M: Memory>(m: &M, addr: Address, dst: &mut std::vec::Vec<u8>, count: usize) {
96 dst.clear();
97 dst.reserve_exact(count);
98 unsafe {
99 m.read_unsafe(addr.get(), dst.as_mut_ptr(), count);
100 dst.set_len(count);
102 }
103}
104
105fn read_u32<M: Memory>(m: &M, addr: Address) -> u32 {
108 let mut buf: [u8; 4] = [0; 4];
109 m.read(addr.get(), &mut buf);
110 u32::from_le_bytes(buf)
111}
112
113fn read_u64<M: Memory>(m: &M, addr: Address) -> u64 {
116 let mut buf: [u8; 8] = [0; 8];
117 m.read(addr.get(), &mut buf);
118 u64::from_le_bytes(buf)
119}
120
121fn write_u32<M: Memory>(m: &M, addr: Address, val: u32) {
123 write(m, addr.get(), &val.to_le_bytes());
124}
125
126fn write_u64<M: Memory>(m: &M, addr: Address, val: u64) {
128 write(m, addr.get(), &val.to_le_bytes());
129}
130
131#[derive(Debug, PartialEq, Eq)]
132pub struct GrowFailed {
133 current_size: u64,
134 delta: u64,
135}
136
137impl Display for GrowFailed {
138 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
139 write!(
140 f,
141 "Failed to grow memory: current size={}, delta={}",
142 self.current_size, self.delta
143 )
144 }
145}
146
147impl error::Error for GrowFailed {}
148
149fn safe_write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) -> Result<(), GrowFailed> {
151 let last_byte = offset
152 .checked_add(bytes.len() as u64)
153 .expect("Address space overflow");
154
155 let size_pages = memory.size();
156 let size_bytes = size_pages
157 .checked_mul(WASM_PAGE_SIZE)
158 .expect("Address space overflow");
159
160 if size_bytes < last_byte {
161 let diff_bytes = last_byte - size_bytes;
162 let diff_pages = diff_bytes
163 .checked_add(WASM_PAGE_SIZE - 1)
164 .expect("Address space overflow")
165 / WASM_PAGE_SIZE;
166 if memory.grow(diff_pages) == -1 {
167 return Err(GrowFailed {
168 current_size: size_pages,
169 delta: diff_pages,
170 });
171 }
172 }
173 memory.write(offset, bytes);
174 Ok(())
175}
176
177fn write<M: Memory>(memory: &M, offset: u64, bytes: &[u8]) {
179 if let Err(GrowFailed {
180 current_size,
181 delta,
182 }) = safe_write(memory, offset, bytes)
183 {
184 panic!(
185 "Failed to grow memory from {} pages to {} pages (delta = {} pages).",
186 current_size,
187 current_size + delta,
188 delta
189 );
190 }
191}
192
193fn read_struct<T, M: Memory>(addr: Address, memory: &M) -> T {
195 let mut value = MaybeUninit::<T>::uninit();
196 unsafe {
197 memory.read_unsafe(
198 addr.get(),
199 value.as_mut_ptr() as *mut u8,
200 core::mem::size_of::<T>(),
201 );
202 value.assume_init()
203 }
204}
205
206fn write_struct<T, M: Memory>(t: &T, addr: Address, memory: &M) {
208 let slice = unsafe {
209 core::slice::from_raw_parts(t as *const _ as *const u8, core::mem::size_of::<T>())
210 };
211
212 write(memory, addr.get(), slice)
213}
214
215#[derive(Clone)]
219pub struct RestrictedMemory<M: Memory> {
220 page_range: core::ops::Range<u64>,
221 memory: M,
222}
223
224impl<M: Memory> RestrictedMemory<M> {
225 pub fn new(memory: M, page_range: core::ops::Range<u64>) -> Self {
226 assert!(page_range.end <= MAX_PAGES);
227 Self { memory, page_range }
228 }
229}
230
231impl<M: Memory> Memory for RestrictedMemory<M> {
232 fn size(&self) -> u64 {
233 let base_size = self.memory.size();
234 if base_size < self.page_range.start {
235 0
236 } else if base_size > self.page_range.end {
237 self.page_range.end - self.page_range.start
238 } else {
239 base_size - self.page_range.start
240 }
241 }
242
243 fn grow(&self, delta: u64) -> i64 {
244 let base_size = self.memory.size();
245 if base_size < self.page_range.start {
246 self.memory
247 .grow(self.page_range.start - base_size + delta)
248 .min(0)
249 } else if base_size >= self.page_range.end {
250 if delta == 0 {
251 (self.page_range.end - self.page_range.start) as i64
252 } else {
253 -1
254 }
255 } else {
256 let pages_left = self.page_range.end - base_size;
257 if pages_left < delta {
258 -1
259 } else {
260 let r = self.memory.grow(delta);
261 if r < 0 {
262 r
263 } else {
264 r - self.page_range.start as i64
265 }
266 }
267 }
268 }
269
270 fn read(&self, offset: u64, dst: &mut [u8]) {
271 self.memory
272 .read(self.page_range.start * WASM_PAGE_SIZE + offset, dst)
273 }
274
275 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
276 self.memory
277 .read_unsafe(self.page_range.start * WASM_PAGE_SIZE + offset, dst, count)
278 }
279
280 fn write(&self, offset: u64, src: &[u8]) {
281 self.memory
282 .write(self.page_range.start * WASM_PAGE_SIZE + offset, src)
283 }
284}