near_vm_vm/memory/
linear_memory.rs1use crate::mmap::Mmap;
2use crate::vmcontext::VMMemoryDefinition;
3use crate::{MemoryError, MemoryStyle};
4use more_asserts::assert_ge;
5use near_vm_types::{Bytes, MemoryType, Pages};
6use parking_lot::Mutex;
7use std::borrow::BorrowMut;
8use std::cell::UnsafeCell;
9use std::convert::TryInto;
10use std::ptr::NonNull;
11
12#[derive(Debug)]
13struct WasmMmap {
14 alloc: Mmap,
16 size: Pages,
18}
19
20#[derive(Debug)]
22pub struct LinearMemory {
23 mmap: Mutex<WasmMmap>,
25
26 maximum: Option<Pages>,
28
29 memory: MemoryType,
31
32 style: MemoryStyle,
34
35 offset_guard_size: usize,
38
39 vm_memory_definition: VMMemoryDefinitionOwnership,
41}
42
43#[derive(Debug)]
46enum VMMemoryDefinitionOwnership {
47 VMOwned(NonNull<VMMemoryDefinition>),
50 HostOwned(Box<UnsafeCell<VMMemoryDefinition>>),
54}
55
56unsafe impl Send for LinearMemory {}
64
65unsafe impl Sync for LinearMemory {}
67
68impl LinearMemory {
69 pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
74 unsafe { Self::new_internal(memory, style, None) }
75 }
76
77 pub unsafe fn from_definition(
85 memory: &MemoryType,
86 style: &MemoryStyle,
87 vm_memory_location: NonNull<VMMemoryDefinition>,
88 ) -> Result<Self, MemoryError> {
89 unsafe { Self::new_internal(memory, style, Some(vm_memory_location)) }
90 }
91
92 unsafe fn new_internal(
94 memory: &MemoryType,
95 style: &MemoryStyle,
96 vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
97 ) -> Result<Self, MemoryError> {
98 if memory.minimum > Pages::max_value() {
99 return Err(MemoryError::MinimumMemoryTooLarge {
100 min_requested: memory.minimum,
101 max_allowed: Pages::max_value(),
102 });
103 }
104 if let Some(max) = memory.maximum {
106 if max > Pages::max_value() {
107 return Err(MemoryError::MaximumMemoryTooLarge {
108 max_requested: max,
109 max_allowed: Pages::max_value(),
110 });
111 }
112 if max < memory.minimum {
113 return Err(MemoryError::InvalidMemory {
114 reason: format!(
115 "the maximum ({} pages) is less than the minimum ({} pages)",
116 max.0, memory.minimum.0
117 ),
118 });
119 }
120 }
121
122 let offset_guard_bytes = style.offset_guard_size() as usize;
123
124 let minimum_pages = match style {
125 MemoryStyle::Dynamic { .. } => memory.minimum,
126 MemoryStyle::Static { bound, .. } => {
127 assert_ge!(*bound, memory.minimum);
128 *bound
129 }
130 };
131 let minimum_bytes = minimum_pages.bytes().0;
132 let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
133 let mapped_pages = memory.minimum;
134 let mapped_bytes = mapped_pages.bytes();
135
136 let mut mmap = WasmMmap {
137 alloc: Mmap::accessible_reserved(mapped_bytes.0, request_bytes)
138 .map_err(MemoryError::Region)?,
139 size: memory.minimum,
140 };
141
142 let base_ptr = mmap.alloc.as_mut_ptr();
143 let mem_length = memory.minimum.bytes().0;
144 Ok(Self {
145 mmap: Mutex::new(mmap),
146 maximum: memory.maximum,
147 offset_guard_size: offset_guard_bytes,
148 vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
149 {
150 let mut ptr = mem_loc;
151 let md = unsafe { ptr.as_mut() };
152 md.base = base_ptr;
153 md.current_length = mem_length;
154 }
155 VMMemoryDefinitionOwnership::VMOwned(mem_loc)
156 } else {
157 VMMemoryDefinitionOwnership::HostOwned(Box::new(UnsafeCell::new(
158 VMMemoryDefinition { base: base_ptr, current_length: mem_length },
159 )))
160 },
161 memory: *memory,
162 style: style.clone(),
163 })
164 }
165
166 unsafe fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
172 match &self.vm_memory_definition {
173 VMMemoryDefinitionOwnership::VMOwned(ptr) => *ptr,
174 VMMemoryDefinitionOwnership::HostOwned(boxed_ptr) => unsafe {
175 NonNull::new_unchecked(boxed_ptr.get())
176 },
177 }
178 }
179}
180
181impl LinearMemory {
182 pub fn ty(&self) -> MemoryType {
184 let minimum = self.size();
185 let mut out = self.memory;
186 out.minimum = minimum;
187
188 out
189 }
190
191 pub fn style(&self) -> &MemoryStyle {
193 &self.style
194 }
195
196 pub fn size(&self) -> Pages {
198 unsafe {
200 let md_ptr = self.get_vm_memory_definition();
201 let md = md_ptr.as_ref();
202 Bytes::from(md.current_length).try_into().unwrap()
203 }
204 }
205
206 pub fn grow(&self, delta: Pages) -> Result<Pages, MemoryError> {
211 let mut mmap_guard = self.mmap.lock();
212 let mmap = mmap_guard.borrow_mut();
213 if delta.0 == 0 {
215 return Ok(mmap.size);
216 }
217
218 let new_pages = mmap
219 .size
220 .checked_add(delta)
221 .ok_or(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta })?;
222 let prev_pages = mmap.size;
223
224 if let Some(maximum) = self.maximum {
225 if new_pages > maximum {
226 return Err(MemoryError::CouldNotGrow {
227 current: mmap.size,
228 attempted_delta: delta,
229 });
230 }
231 }
232
233 if new_pages >= Pages::max_value() {
237 return Err(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta });
239 }
240
241 let delta_bytes = delta.bytes().0;
242 let prev_bytes = prev_pages.bytes().0;
243 let new_bytes = new_pages.bytes().0;
244
245 if new_bytes > mmap.alloc.len() - self.offset_guard_size {
246 let guard_bytes = self.offset_guard_size;
249 let request_bytes =
250 new_bytes.checked_add(guard_bytes).ok_or_else(|| MemoryError::CouldNotGrow {
251 current: new_pages,
252 attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
253 })?;
254
255 let mut new_mmap =
256 Mmap::accessible_reserved(new_bytes, request_bytes).map_err(MemoryError::Region)?;
257
258 let copy_len = mmap.alloc.len() - self.offset_guard_size;
259 new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&mmap.alloc.as_slice()[..copy_len]);
260
261 mmap.alloc = new_mmap;
262 } else if delta_bytes > 0 {
263 mmap.alloc.make_accessible(prev_bytes, delta_bytes).map_err(MemoryError::Region)?;
265 }
266
267 mmap.size = new_pages;
268
269 unsafe {
271 let mut md_ptr = self.get_vm_memory_definition();
272 let md = md_ptr.as_mut();
273 md.current_length = new_pages.bytes().0;
274 md.base = mmap.alloc.as_mut_ptr() as _;
275 }
276
277 Ok(prev_pages)
278 }
279
280 pub fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
282 let _mmap_guard = self.mmap.lock();
283 unsafe { self.get_vm_memory_definition() }
284 }
285}