near_vm_vm/memory/
linear_memory.rs

1use crate::mmap::Mmap;
2use crate::vmcontext::VMMemoryDefinition;
3use crate::{MemoryError, MemoryStyle};
4use more_asserts::assert_ge;
5use near_vm_types::{Bytes, MemoryType, Pages};
6use parking_lot::Mutex;
7use std::borrow::BorrowMut;
8use std::cell::UnsafeCell;
9use std::convert::TryInto;
10use std::ptr::NonNull;
11
12#[derive(Debug)]
13struct WasmMmap {
14    // Our OS allocation of mmap'd memory.
15    alloc: Mmap,
16    // The current logical size in wasm pages of this linear memory.
17    size: Pages,
18}
19
20/// A linear memory instance.
21#[derive(Debug)]
22pub struct LinearMemory {
23    // The underlying allocation.
24    mmap: Mutex<WasmMmap>,
25
26    // The optional maximum size in wasm pages of this linear memory.
27    maximum: Option<Pages>,
28
29    /// The WebAssembly linear memory description.
30    memory: MemoryType,
31
32    /// Our chosen implementation style.
33    style: MemoryStyle,
34
35    // Size in bytes of extra guard pages after the end to optimize loads and stores with
36    // constant offsets.
37    offset_guard_size: usize,
38
39    /// The owned memory definition used by the generated code
40    vm_memory_definition: VMMemoryDefinitionOwnership,
41}
42
43/// A type to help manage who is responsible for the backing memory of them
44/// `VMMemoryDefinition`.
45#[derive(Debug)]
46enum VMMemoryDefinitionOwnership {
47    /// The `VMMemoryDefinition` is owned by the `Instance` and we should use
48    /// its memory. This is how a local memory that's exported should be stored.
49    VMOwned(NonNull<VMMemoryDefinition>),
50    /// The `VMMemoryDefinition` is owned by the host and we should manage its
51    /// memory. This is how an imported memory that doesn't come from another
52    /// Wasm module should be stored.
53    HostOwned(Box<UnsafeCell<VMMemoryDefinition>>),
54}
55
56/// We must implement this because of `VMMemoryDefinitionOwnership::VMOwned`.
57/// This is correct because synchronization of memory accesses is controlled
58/// by the VM.
59// REVIEW: I don't believe ^; this probably shouldn't be `Send`...
60// mutations from other threads into this data could be a problem, but we probably
61// don't want to use atomics for this in the generated code.
62// TODO:
63unsafe impl Send for LinearMemory {}
64
65/// This is correct because all internal mutability is protected by a mutex.
66unsafe impl Sync for LinearMemory {}
67
68impl LinearMemory {
69    /// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
70    ///
71    /// This creates a `LinearMemory` with owned metadata: this can be used to create a memory
72    /// that will be imported into Wasm modules.
73    pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
74        unsafe { Self::new_internal(memory, style, None) }
75    }
76
77    /// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
78    ///
79    /// This creates a `LinearMemory` with metadata owned by a VM, pointed to by
80    /// `vm_memory_location`: this can be used to create a local memory.
81    ///
82    /// # Safety
83    /// - `vm_memory_location` must point to a valid location in VM memory.
84    pub unsafe fn from_definition(
85        memory: &MemoryType,
86        style: &MemoryStyle,
87        vm_memory_location: NonNull<VMMemoryDefinition>,
88    ) -> Result<Self, MemoryError> {
89        unsafe { Self::new_internal(memory, style, Some(vm_memory_location)) }
90    }
91
92    /// Build a `LinearMemory` with either self-owned or VM owned metadata.
93    unsafe fn new_internal(
94        memory: &MemoryType,
95        style: &MemoryStyle,
96        vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
97    ) -> Result<Self, MemoryError> {
98        if memory.minimum > Pages::max_value() {
99            return Err(MemoryError::MinimumMemoryTooLarge {
100                min_requested: memory.minimum,
101                max_allowed: Pages::max_value(),
102            });
103        }
104        // `maximum` cannot be set to more than `65536` pages.
105        if let Some(max) = memory.maximum {
106            if max > Pages::max_value() {
107                return Err(MemoryError::MaximumMemoryTooLarge {
108                    max_requested: max,
109                    max_allowed: Pages::max_value(),
110                });
111            }
112            if max < memory.minimum {
113                return Err(MemoryError::InvalidMemory {
114                    reason: format!(
115                        "the maximum ({} pages) is less than the minimum ({} pages)",
116                        max.0, memory.minimum.0
117                    ),
118                });
119            }
120        }
121
122        let offset_guard_bytes = style.offset_guard_size() as usize;
123
124        let minimum_pages = match style {
125            MemoryStyle::Dynamic { .. } => memory.minimum,
126            MemoryStyle::Static { bound, .. } => {
127                assert_ge!(*bound, memory.minimum);
128                *bound
129            }
130        };
131        let minimum_bytes = minimum_pages.bytes().0;
132        let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
133        let mapped_pages = memory.minimum;
134        let mapped_bytes = mapped_pages.bytes();
135
136        let mut mmap = WasmMmap {
137            alloc: Mmap::accessible_reserved(mapped_bytes.0, request_bytes)
138                .map_err(MemoryError::Region)?,
139            size: memory.minimum,
140        };
141
142        let base_ptr = mmap.alloc.as_mut_ptr();
143        let mem_length = memory.minimum.bytes().0;
144        Ok(Self {
145            mmap: Mutex::new(mmap),
146            maximum: memory.maximum,
147            offset_guard_size: offset_guard_bytes,
148            vm_memory_definition: if let Some(mem_loc) = vm_memory_location {
149                {
150                    let mut ptr = mem_loc;
151                    let md = unsafe { ptr.as_mut() };
152                    md.base = base_ptr;
153                    md.current_length = mem_length;
154                }
155                VMMemoryDefinitionOwnership::VMOwned(mem_loc)
156            } else {
157                VMMemoryDefinitionOwnership::HostOwned(Box::new(UnsafeCell::new(
158                    VMMemoryDefinition { base: base_ptr, current_length: mem_length },
159                )))
160            },
161            memory: *memory,
162            style: style.clone(),
163        })
164    }
165
166    /// Get the `VMMemoryDefinition`.
167    ///
168    /// # Safety
169    /// - You must ensure that you have mutually exclusive access before calling
170    ///   this function. You can get this by locking the `mmap` mutex.
171    unsafe fn get_vm_memory_definition(&self) -> NonNull<VMMemoryDefinition> {
172        match &self.vm_memory_definition {
173            VMMemoryDefinitionOwnership::VMOwned(ptr) => *ptr,
174            VMMemoryDefinitionOwnership::HostOwned(boxed_ptr) => unsafe {
175                NonNull::new_unchecked(boxed_ptr.get())
176            },
177        }
178    }
179}
180
181impl LinearMemory {
182    /// Returns the type for this memory.
183    pub fn ty(&self) -> MemoryType {
184        let minimum = self.size();
185        let mut out = self.memory;
186        out.minimum = minimum;
187
188        out
189    }
190
191    /// Returns the memory style for this memory.
192    pub fn style(&self) -> &MemoryStyle {
193        &self.style
194    }
195
196    /// Returns the number of allocated wasm pages.
197    pub fn size(&self) -> Pages {
198        // TODO: investigate this function for race conditions
199        unsafe {
200            let md_ptr = self.get_vm_memory_definition();
201            let md = md_ptr.as_ref();
202            Bytes::from(md.current_length).try_into().unwrap()
203        }
204    }
205
206    /// Grow memory by the specified amount of wasm pages.
207    ///
208    /// Returns `None` if memory can't be grown by the specified amount
209    /// of wasm pages.
210    pub fn grow(&self, delta: Pages) -> Result<Pages, MemoryError> {
211        let mut mmap_guard = self.mmap.lock();
212        let mmap = mmap_guard.borrow_mut();
213        // Optimization of memory.grow 0 calls.
214        if delta.0 == 0 {
215            return Ok(mmap.size);
216        }
217
218        let new_pages = mmap
219            .size
220            .checked_add(delta)
221            .ok_or(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta })?;
222        let prev_pages = mmap.size;
223
224        if let Some(maximum) = self.maximum {
225            if new_pages > maximum {
226                return Err(MemoryError::CouldNotGrow {
227                    current: mmap.size,
228                    attempted_delta: delta,
229                });
230            }
231        }
232
233        // Wasm linear memories are never allowed to grow beyond what is
234        // indexable. If the memory has no maximum, enforce the greatest
235        // limit here.
236        if new_pages >= Pages::max_value() {
237            // Linear memory size would exceed the index range.
238            return Err(MemoryError::CouldNotGrow { current: mmap.size, attempted_delta: delta });
239        }
240
241        let delta_bytes = delta.bytes().0;
242        let prev_bytes = prev_pages.bytes().0;
243        let new_bytes = new_pages.bytes().0;
244
245        if new_bytes > mmap.alloc.len() - self.offset_guard_size {
246            // If the new size is within the declared maximum, but needs more memory than we
247            // have on hand, it's a dynamic heap and it can move.
248            let guard_bytes = self.offset_guard_size;
249            let request_bytes =
250                new_bytes.checked_add(guard_bytes).ok_or_else(|| MemoryError::CouldNotGrow {
251                    current: new_pages,
252                    attempted_delta: Bytes(guard_bytes).try_into().unwrap(),
253                })?;
254
255            let mut new_mmap =
256                Mmap::accessible_reserved(new_bytes, request_bytes).map_err(MemoryError::Region)?;
257
258            let copy_len = mmap.alloc.len() - self.offset_guard_size;
259            new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&mmap.alloc.as_slice()[..copy_len]);
260
261            mmap.alloc = new_mmap;
262        } else if delta_bytes > 0 {
263            // Make the newly allocated pages accessible.
264            mmap.alloc.make_accessible(prev_bytes, delta_bytes).map_err(MemoryError::Region)?;
265        }
266
267        mmap.size = new_pages;
268
269        // update memory definition
270        unsafe {
271            let mut md_ptr = self.get_vm_memory_definition();
272            let md = md_ptr.as_mut();
273            md.current_length = new_pages.bytes().0;
274            md.base = mmap.alloc.as_mut_ptr() as _;
275        }
276
277        Ok(prev_pages)
278    }
279
280    /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
281    pub fn vmmemory(&self) -> NonNull<VMMemoryDefinition> {
282        let _mmap_guard = self.mmap.lock();
283        unsafe { self.get_vm_memory_definition() }
284    }
285}