lucet_runtime_internals/alloc/
mod.rs1use crate::error::Error;
2use crate::module::Module;
3use crate::region::RegionInternal;
4use libc::c_void;
5use lucet_module::GlobalValue;
6use nix::unistd::{sysconf, SysconfVar};
7use std::sync::{Arc, Once, Weak};
8
9pub const HOST_PAGE_SIZE_EXPECTED: usize = 4096;
10static mut HOST_PAGE_SIZE: usize = 0;
11static HOST_PAGE_SIZE_INIT: Once = Once::new();
12
13pub fn host_page_size() -> usize {
17 unsafe {
18 HOST_PAGE_SIZE_INIT.call_once(|| match sysconf(SysconfVar::PAGE_SIZE) {
19 Ok(Some(sz)) => {
20 if sz as usize == HOST_PAGE_SIZE_EXPECTED {
21 HOST_PAGE_SIZE = HOST_PAGE_SIZE_EXPECTED;
22 } else {
23 panic!(
24 "host page size was {}; expected {}",
25 sz, HOST_PAGE_SIZE_EXPECTED
26 );
27 }
28 }
29 _ => panic!("could not get host page size from sysconf"),
30 });
31 HOST_PAGE_SIZE
32 }
33}
34
35pub fn instance_heap_offset() -> usize {
36 1 * host_page_size()
37}
38
39#[repr(C)]
50pub struct Slot {
51 pub start: *mut c_void,
56
57 pub heap: *mut c_void,
62
63 pub stack: *mut c_void,
69
70 pub globals: *mut c_void,
72
73 pub sigstack: *mut c_void,
78
79 pub limits: Limits,
83
84 pub region: Weak<dyn RegionInternal>,
85}
86
87unsafe impl Send for Slot {}
89unsafe impl Sync for Slot {}
90
91impl Slot {
92 pub fn stack_top(&self) -> *mut c_void {
93 (self.stack as usize + self.limits.stack_size) as *mut c_void
94 }
95}
96
97pub struct Alloc {
102 pub heap_accessible_size: usize,
103 pub heap_inaccessible_size: usize,
104 pub slot: Option<Slot>,
105 pub region: Arc<dyn RegionInternal>,
106}
107
108impl Drop for Alloc {
109 fn drop(&mut self) {
110 self.region.clone().drop_alloc(self);
112 }
113}
114
115#[derive(Clone, Copy, Debug, Eq, PartialEq)]
116pub enum AddrLocation {
117 Heap,
118 InaccessibleHeap,
119 StackGuard,
120 Stack,
121 Globals,
122 SigStackGuard,
123 SigStack,
124 Unknown,
125}
126
127impl AddrLocation {
128 pub fn is_fault_fatal(self) -> bool {
134 use AddrLocation::*;
135 match self {
136 SigStackGuard | Unknown => true,
137 _ => false,
138 }
139 }
140}
141
142impl Alloc {
143 pub fn addr_location(&self, addr: *const c_void) -> AddrLocation {
145 let addr = addr as usize;
146
147 let heap_start = self.slot().heap as usize;
148 let heap_inaccessible_start = heap_start + self.heap_accessible_size;
149 let heap_inaccessible_end = heap_start + self.slot().limits.heap_address_space_size;
150
151 if (addr >= heap_start) && (addr < heap_inaccessible_start) {
152 return AddrLocation::Heap;
153 }
154 if (addr >= heap_inaccessible_start) && (addr < heap_inaccessible_end) {
155 return AddrLocation::InaccessibleHeap;
156 }
157
158 let stack_start = self.slot().stack as usize;
159 let stack_end = stack_start + self.slot().limits.stack_size;
160 let stack_guard_start = stack_start - host_page_size();
161
162 if (addr >= stack_guard_start) && (addr < stack_start) {
163 return AddrLocation::StackGuard;
164 }
165 if (addr >= stack_start) && (addr < stack_end) {
166 return AddrLocation::Stack;
167 }
168
169 let globals_start = self.slot().globals as usize;
170 let globals_end = globals_start + self.slot().limits.globals_size;
171
172 if (addr >= globals_start) && (addr < globals_end) {
173 return AddrLocation::Globals;
174 }
175
176 let sigstack_start = self.slot().sigstack as usize;
177 let sigstack_end = sigstack_start + self.slot().limits.signal_stack_size;
178 let sigstack_guard_start = sigstack_start - host_page_size();
179
180 if (addr >= sigstack_guard_start) && (addr < sigstack_start) {
181 return AddrLocation::SigStackGuard;
182 }
183 if (addr >= sigstack_start) && (addr < sigstack_end) {
184 return AddrLocation::SigStack;
185 }
186
187 AddrLocation::Unknown
188 }
189
190 pub fn expand_heap(&mut self, expand_bytes: u32, module: &dyn Module) -> Result<u32, Error> {
191 let slot = self.slot();
192
193 if expand_bytes == 0 {
194 return Ok(self.heap_accessible_size as u32);
196 }
197
198 let host_page_size = host_page_size() as u32;
199
200 if self.heap_accessible_size as u32 % host_page_size != 0 {
201 lucet_bail!("heap is not page-aligned; this is a bug");
202 }
203
204 if expand_bytes > std::u32::MAX - host_page_size - 1 {
205 bail_limits_exceeded!("expanded heap would overflow address space");
206 }
207
208 let expand_pagealigned =
210 ((expand_bytes + host_page_size - 1) / host_page_size) * host_page_size;
211
212 if expand_pagealigned as usize > self.heap_inaccessible_size {
215 bail_limits_exceeded!("expanded heap would overflow addressable memory");
216 }
217
218 let guard_remaining = self.heap_inaccessible_size - expand_pagealigned as usize;
220
221 if let Some(heap_spec) = module.heap_spec() {
222 if guard_remaining < heap_spec.guard_size as usize {
226 bail_limits_exceeded!("expansion would leave guard memory too small");
227 }
228
229 if let Some(max_size) = heap_spec.max_size {
232 if self.heap_accessible_size + expand_pagealigned as usize > max_size as usize {
233 bail_limits_exceeded!(
234 "expansion would exceed module-specified heap limit: {:?}",
235 max_size
236 );
237 }
238 }
239 } else {
240 return Err(Error::NoLinearMemory("cannot expand heap".to_owned()));
241 }
242 if self.heap_accessible_size + expand_pagealigned as usize > slot.limits.heap_memory_size {
245 bail_limits_exceeded!(
246 "expansion would exceed runtime-specified heap limit: {:?}",
247 slot.limits
248 );
249 }
250
251 let newly_accessible = self.heap_accessible_size;
252
253 self.region
254 .clone()
255 .expand_heap(slot, newly_accessible as u32, expand_pagealigned)?;
256
257 self.heap_accessible_size += expand_pagealigned as usize;
258 self.heap_inaccessible_size -= expand_pagealigned as usize;
259
260 Ok(newly_accessible as u32)
261 }
262
263 pub fn reset_heap(&mut self, module: &dyn Module) -> Result<(), Error> {
264 self.region.clone().reset_heap(self, module)
265 }
266
267 pub fn heap_len(&self) -> usize {
268 self.heap_accessible_size
269 }
270
271 pub fn slot(&self) -> &Slot {
272 self.slot
273 .as_ref()
274 .expect("alloc missing its slot before drop")
275 }
276
277 pub unsafe fn heap(&self) -> &[u8] {
279 std::slice::from_raw_parts(self.slot().heap as *mut u8, self.heap_accessible_size)
280 }
281
282 pub unsafe fn heap_mut(&mut self) -> &mut [u8] {
284 std::slice::from_raw_parts_mut(self.slot().heap as *mut u8, self.heap_accessible_size)
285 }
286
287 pub unsafe fn heap_u32(&self) -> &[u32] {
289 assert!(self.slot().heap as usize % 4 == 0, "heap is 4-byte aligned");
290 assert!(
291 self.heap_accessible_size % 4 == 0,
292 "heap size is multiple of 4-bytes"
293 );
294 std::slice::from_raw_parts(self.slot().heap as *mut u32, self.heap_accessible_size / 4)
295 }
296
297 pub unsafe fn heap_u32_mut(&mut self) -> &mut [u32] {
299 assert!(self.slot().heap as usize % 4 == 0, "heap is 4-byte aligned");
300 assert!(
301 self.heap_accessible_size % 4 == 0,
302 "heap size is multiple of 4-bytes"
303 );
304 std::slice::from_raw_parts_mut(self.slot().heap as *mut u32, self.heap_accessible_size / 4)
305 }
306
307 pub unsafe fn heap_u64(&self) -> &[u64] {
309 assert!(self.slot().heap as usize % 8 == 0, "heap is 8-byte aligned");
310 assert!(
311 self.heap_accessible_size % 8 == 0,
312 "heap size is multiple of 8-bytes"
313 );
314 std::slice::from_raw_parts(self.slot().heap as *mut u64, self.heap_accessible_size / 8)
315 }
316
317 pub unsafe fn heap_u64_mut(&mut self) -> &mut [u64] {
319 assert!(self.slot().heap as usize % 8 == 0, "heap is 8-byte aligned");
320 assert!(
321 self.heap_accessible_size % 8 == 0,
322 "heap size is multiple of 8-bytes"
323 );
324 std::slice::from_raw_parts_mut(self.slot().heap as *mut u64, self.heap_accessible_size / 8)
325 }
326
327 pub unsafe fn stack_mut(&mut self) -> &mut [u8] {
333 std::slice::from_raw_parts_mut(self.slot().stack as *mut u8, self.slot().limits.stack_size)
334 }
335
336 pub unsafe fn stack_u64_mut(&mut self) -> &mut [u64] {
342 assert!(
343 self.slot().stack as usize % 8 == 0,
344 "stack is 8-byte aligned"
345 );
346 assert!(
347 self.slot().limits.stack_size % 8 == 0,
348 "stack size is multiple of 8-bytes"
349 );
350 std::slice::from_raw_parts_mut(
351 self.slot().stack as *mut u64,
352 self.slot().limits.stack_size / 8,
353 )
354 }
355
356 pub unsafe fn globals(&self) -> &[GlobalValue] {
358 std::slice::from_raw_parts(
359 self.slot().globals as *const GlobalValue,
360 self.slot().limits.globals_size / std::mem::size_of::<GlobalValue>(),
361 )
362 }
363
364 pub unsafe fn globals_mut(&mut self) -> &mut [GlobalValue] {
366 std::slice::from_raw_parts_mut(
367 self.slot().globals as *mut GlobalValue,
368 self.slot().limits.globals_size / std::mem::size_of::<GlobalValue>(),
369 )
370 }
371
372 pub unsafe fn sigstack_mut(&mut self) -> &mut [u8] {
374 std::slice::from_raw_parts_mut(
375 self.slot().sigstack as *mut u8,
376 self.slot().limits.signal_stack_size,
377 )
378 }
379
380 pub fn mem_in_heap<T>(&self, ptr: *const T, len: usize) -> bool {
381 let start = ptr as usize;
382 let end = start + len;
383
384 let heap_start = self.slot().heap as usize;
385 let heap_end = heap_start + self.heap_accessible_size;
386
387 start <= end
389 && start >= heap_start
390 && start < heap_end
391 && end >= heap_start
392 && end <= heap_end
393 }
394}
395
396#[derive(Clone, Debug)]
400#[repr(C)]
401pub struct Limits {
402 pub heap_memory_size: usize,
404 pub heap_address_space_size: usize,
406 pub stack_size: usize,
408 pub globals_size: usize,
410 pub signal_stack_size: usize,
420}
421
422#[cfg(target_os = "macos")]
424pub const MINSIGSTKSZ: usize = 32 * 1024;
425
426#[cfg(not(target_os = "macos"))]
427pub const MINSIGSTKSZ: usize = libc::MINSIGSTKSZ;
428
429#[cfg(all(debug_assertions, not(target_os = "macos")))]
431pub const DEFAULT_SIGNAL_STACK_SIZE: usize = 12 * 1024;
432
433#[cfg(all(debug_assertions, target_os = "macos"))]
436pub const DEFAULT_SIGNAL_STACK_SIZE: usize = libc::SIGSTKSZ;
437
438#[cfg(not(debug_assertions))]
439pub const DEFAULT_SIGNAL_STACK_SIZE: usize = libc::SIGSTKSZ;
440
441impl Limits {
442 pub const fn default() -> Limits {
443 Limits {
444 heap_memory_size: 16 * 64 * 1024,
445 heap_address_space_size: 0x200000000,
446 stack_size: 128 * 1024,
447 globals_size: 4096,
448 signal_stack_size: DEFAULT_SIGNAL_STACK_SIZE,
449 }
450 }
451}
452
453impl Limits {
454 pub fn total_memory_size(&self) -> usize {
455 [
464 instance_heap_offset(),
465 self.heap_address_space_size,
466 host_page_size(),
467 self.stack_size,
468 self.globals_size,
469 host_page_size(),
470 self.signal_stack_size,
471 ]
472 .iter()
473 .try_fold(0usize, |acc, &x| acc.checked_add(x))
474 .expect("total_memory_size doesn't overflow")
475 }
476
477 pub fn validate(&self) -> Result<(), Error> {
479 if self.heap_memory_size % host_page_size() != 0 {
480 return Err(Error::InvalidArgument(
481 "memory size must be a multiple of host page size",
482 ));
483 }
484 if self.heap_address_space_size % host_page_size() != 0 {
485 return Err(Error::InvalidArgument(
486 "address space size must be a multiple of host page size",
487 ));
488 }
489 if self.heap_memory_size > self.heap_address_space_size {
490 return Err(Error::InvalidArgument(
491 "address space size must be at least as large as memory size",
492 ));
493 }
494 if self.stack_size % host_page_size() != 0 {
495 return Err(Error::InvalidArgument(
496 "stack size must be a multiple of host page size",
497 ));
498 }
499 if self.globals_size % host_page_size() != 0 {
500 return Err(Error::InvalidArgument(
501 "globals size must be a multiple of host page size",
502 ));
503 }
504 if self.stack_size <= 0 {
505 return Err(Error::InvalidArgument("stack size must be greater than 0"));
506 }
507 if self.signal_stack_size < MINSIGSTKSZ {
508 return Err(Error::InvalidArgument(
509 "signal stack size must be at least MINSIGSTKSZ (defined in <signal.h>)",
510 ));
511 }
512 if cfg!(debug_assertions) && self.signal_stack_size < 12 * 1024 {
513 return Err(Error::InvalidArgument(
514 "signal stack size must be at least 12KiB for debug builds",
515 ));
516 }
517 if self.signal_stack_size % host_page_size() != 0 {
518 return Err(Error::InvalidArgument(
519 "signal stack size must be a multiple of host page size",
520 ));
521 }
522 Ok(())
523 }
524}
525
526pub mod tests;