1use crate::mmap::Mmap;
9use crate::vmcontext::VMMemoryDefinition;
10use more_asserts::assert_ge;
11use std::borrow::BorrowMut;
12use std::cell::UnsafeCell;
13use std::convert::TryInto;
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::Mutex;
17use thiserror::Error;
18use unc_vm_types::{Bytes, MemoryType, Pages};
19
20#[derive(Error, Debug, Clone, PartialEq, Hash)]
22pub enum MemoryError {
23 #[error("Error when allocating memory: {0}")]
25 Region(String),
26 #[error("The memory could not grow: current size {} pages, requested increase: {} pages", current.0, attempted_delta.0)]
29 CouldNotGrow {
30 current: Pages,
32 attempted_delta: Pages,
34 },
35 #[error("The memory is invalid because {}", reason)]
37 InvalidMemory {
38 reason: String,
40 },
41 #[error("The minimum requested ({} pages) memory is greater than the maximum allowed memory ({} pages)", min_requested.0, max_allowed.0)]
43 MinimumMemoryTooLarge {
44 min_requested: Pages,
46 max_allowed: Pages,
48 },
49 #[error("The maximum requested memory ({} pages) is greater than the maximum allowed memory ({} pages)", max_requested.0, max_allowed.0)]
51 MaximumMemoryTooLarge {
52 max_requested: Pages,
54 max_allowed: Pages,
56 },
57 #[error("A user-defined error occurred: {0}")]
59 Generic(String),
60}
61
62#[derive(Debug, Clone, PartialEq, Eq, Hash, rkyv::Serialize, rkyv::Deserialize, rkyv::Archive)]
64pub enum MemoryStyle {
65 Dynamic {
67 offset_guard_size: u64,
72 },
73 Static {
75 bound: Pages,
77 offset_guard_size: u64,
82 },
83}
84
85impl MemoryStyle {
86 pub fn offset_guard_size(&self) -> u64 {
88 match self {
89 Self::Dynamic { offset_guard_size } => *offset_guard_size,
90 Self::Static { offset_guard_size, .. } => *offset_guard_size,
91 }
92 }
93}
94
95pub trait Memory: fmt::Debug + Send + Sync {
97 fn ty(&self) -> MemoryType;
99
100 fn style(&self) -> &MemoryStyle;
102
103 fn size(&self) -> Pages;
105
106 fn grow(&self, delta: Pages) -> Result<Pages, MemoryError>;
108
109 fn vmmemory(&self) -> NonNull<VMMemoryDefinition>;
113}
114
115#[derive(Debug)]
117pub struct LinearMemory {
118 mmap: Mutex<WasmMmap>,
120
121 maximum: Option<Pages>,
123
124 memory: MemoryType,
126
127 style: MemoryStyle,
129
130 offset_guard_size: usize,
133
134 vm_memory_definition: VMMemoryDefinitionOwnership,
136}
137
138#[derive(Debug)]
141enum VMMemoryDefinitionOwnership {
142 VMOwned(NonNull<VMMemoryDefinition>),
145 HostOwned(Box<UnsafeCell<VMMemoryDefinition>>),
149}
150
151unsafe impl Send for LinearMemory {}
159
160unsafe impl Sync for LinearMemory {}
162
163#[derive(Debug)]
164struct WasmMmap {
165 alloc: Mmap,
167 size: Pages,
169}
170
171impl LinearMemory {
172 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
177 unsafe { Self::new_internal(memory, style, None) }
178 }
179
180 pub unsafe fn from_definition(
188 memory: &MemoryType,
189 style: &MemoryStyle,
190 vm_memory_location: NonNull<VMMemoryDefinition>,
191 ) -> Result<Self, MemoryError> {
192 Self::new_internal(memory, style, Some(vm_memory_location))
193 }
194
195 unsafe fn new_internal(
197 memory: &MemoryType,
198 style: &MemoryStyle,
199 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
200 ) -> Result<Self, MemoryError> {
201 if memory.minimum > Pages::max_value() {
202 return Err(MemoryError::MinimumMemoryTooLarge {
203 min_requested: memory.minimum,
204 max_allowed: Pages::max_value(),
205 });
206 }
207 if let Some(max) = memory.maximum {
209 if max > Pages::max_value() {
210 return Err(MemoryError::MaximumMemoryTooLarge {
211 max_requested: max,
212 max_allowed: Pages::max_value(),
213 });
214 }
215 if max < memory.minimum {
216 return Err(MemoryError::InvalidMemory {
217 reason: format!(
218 "the maximum ({} pages) is less than the minimum ({} pages)",
219 max.0, memory.minimum.0
220 ),
221 });
222 }
223 }
224
225 let offset_guard_bytes = style.offset_guard_size() as usize;
226
227 let minimum_pages = match style {
228 MemoryStyle::Dynamic { .. } => memory.minimum,
229 MemoryStyle::Static { bound, .. } => {
230 assert_ge!(*bound, memory.minimum);
231 *bound
232 }
233 };
234 let minimum_bytes = minimum_pages.bytes().0;
235 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
236 let mapped_pages = memory.minimum;
237 let mapped_bytes = mapped_pages.bytes();
238
239 let mut mmap = WasmMmap {
240 alloc: Mmap::accessible_reserved(mapped_bytes.0, request_bytes)
241 .map_err(MemoryError::Region)?,
242 size: memory.minimum,
243 };
244
245 let base_ptr = mmap.alloc.as_mut_ptr();
246 let mem_length = memory.minimum.bytes().0;
247 Ok(Self {
248 mmap: Mutex::new(mmap),
249 maximum: memory.maximum,
250 offset_guard_size: offset_guard_bytes,
251 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
252 {
253 let mut ptr = mem_loc;
254 let md = ptr.as_mut();
255 md.base = base_ptr;
256 md.current_length = mem_length;
257 }
258 VMMemoryDefinitionOwnership::VMOwned(mem_loc)
259 } else {
260 VMMemoryDefinitionOwnership::HostOwned(Box::new(UnsafeCell::new(
261 VMMemoryDefinition { base: base_ptr, current_length: mem_length },
262 )))
263 },
264 memory: *memory,
265 style: style.clone(),
266 })
267 }
268
269 unsafe fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
275 match &self.vm_memory_definition {
276 VMMemoryDefinitionOwnership::VMOwned(ptr) => *ptr,
277 VMMemoryDefinitionOwnership::HostOwned(boxed_ptr) => {
278 NonNull::new_unchecked(boxed_ptr.get())
279 }
280 }
281 }
282}
283
284impl Memory for LinearMemory {
285 fn ty(&self) -> MemoryType {
287 let minimum = self.size();
288 let mut out = self.memory;
289 out.minimum = minimum;
290
291 out
292 }
293
294 fn style(&self) -> &MemoryStyle {
296 &self.style
297 }
298
299 fn size(&self) -> Pages {
301 unsafe {
303 let md_ptr = self.get_vm_memory_definition();
304 let md = md_ptr.as_ref();
305 Bytes::from(md.current_length).try_into().unwrap()
306 }
307 }
308
309 fn grow(&self, delta: Pages) -> Result<Pages, MemoryError> {
314 let mut mmap_guard = self.mmap.lock().unwrap();
315 let mmap = mmap_guard.borrow_mut();
316 if delta.0 == 0 {
318 return Ok(mmap.size);
319 }
320
321 let new_pages = mmap
322 .size
323 .checked_add(delta)
324 .ok_or(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta })?;
325 let prev_pages = mmap.size;
326
327 if let Some(maximum) = self.maximum {
328 if new_pages > maximum {
329 return Err(MemoryError::CouldNotGrow {
330 current: mmap.size,
331 attempted_delta: delta,
332 });
333 }
334 }
335
336 if new_pages >= Pages::max_value() {
340 return Err(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta });
342 }
343
344 let delta_bytes = delta.bytes().0;
345 let prev_bytes = prev_pages.bytes().0;
346 let new_bytes = new_pages.bytes().0;
347
348 if new_bytes > mmap.alloc.len() - self.offset_guard_size {
349 let guard_bytes = self.offset_guard_size;
352 let request_bytes =
353 new_bytes.checked_add(guard_bytes).ok_or_else(|| MemoryError::CouldNotGrow {
354 current: new_pages,
355 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
356 })?;
357
358 let mut new_mmap =
359 Mmap::accessible_reserved(new_bytes, request_bytes).map_err(MemoryError::Region)?;
360
361 let copy_len = mmap.alloc.len() - self.offset_guard_size;
362 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&mmap.alloc.as_slice()[..copy_len]);
363
364 mmap.alloc = new_mmap;
365 } else if delta_bytes > 0 {
366 mmap.alloc.make_accessible(prev_bytes, delta_bytes).map_err(MemoryError::Region)?;
368 }
369
370 mmap.size = new_pages;
371
372 unsafe {
374 let mut md_ptr = self.get_vm_memory_definition();
375 let md = md_ptr.as_mut();
376 md.current_length = new_pages.bytes().0;
377 md.base = mmap.alloc.as_mut_ptr() as _;
378 }
379
380 Ok(prev_pages)
381 }
382
383 fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
385 let _mmap_guard = self.mmap.lock().unwrap();
386 unsafe { self.get_vm_memory_definition() }
387 }
388}