substrate_wasmtime_runtime/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4use crate::VMContext;
5use backtrace::Backtrace;
6use std::any::Any;
7use std::cell::Cell;
8use std::error::Error;
9use std::io;
10use std::ptr;
11use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
12use std::sync::Once;
13use wasmtime_environ::ir;
14
15extern "C" {
16 fn RegisterSetjmp(
17 jmp_buf: *mut *const u8,
18 callback: extern "C" fn(*mut u8),
19 payload: *mut u8,
20 ) -> i32;
21 fn Unwind(jmp_buf: *const u8) -> !;
22}
23
24cfg_if::cfg_if! {
25 if #[cfg(unix)] {
26 use std::mem::{self, MaybeUninit};
27
28 /// Function which may handle custom signals while processing traps.
29 pub type SignalHandler<'a> = dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool
30 + Send + Sync + 'a;
31
32 static mut PREV_SIGSEGV: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
33 static mut PREV_SIGBUS: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
34 static mut PREV_SIGILL: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
35 static mut PREV_SIGFPE: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
36
37 unsafe fn platform_init() {
38 let register = |slot: &mut MaybeUninit<libc::sigaction>, signal: i32| {
39 let mut handler: libc::sigaction = mem::zeroed();
40 // The flags here are relatively careful, and they are...
41 //
42 // SA_SIGINFO gives us access to information like the program
43 // counter from where the fault happened.
44 //
45 // SA_ONSTACK allows us to handle signals on an alternate stack,
46 // so that the handler can run in response to running out of
47 // stack space on the main stack. Rust installs an alternate
48 // stack with sigaltstack, so we rely on that.
49 //
50 // SA_NODEFER allows us to reenter the signal handler if we
51 // crash while handling the signal, and fall through to the
52 // Breakpad handler by testing handlingSegFault.
53 handler.sa_flags = libc::SA_SIGINFO | libc::SA_NODEFER | libc::SA_ONSTACK;
54 handler.sa_sigaction = trap_handler as usize;
55 libc::sigemptyset(&mut handler.sa_mask);
56 if libc::sigaction(signal, &handler, slot.as_mut_ptr()) != 0 {
57 panic!(
58 "unable to install signal handler: {}",
59 io::Error::last_os_error(),
60 );
61 }
62 };
63
64 // Allow handling OOB with signals on all architectures
65 register(&mut PREV_SIGSEGV, libc::SIGSEGV);
66
67 // Handle `unreachable` instructions which execute `ud2` right now
68 register(&mut PREV_SIGILL, libc::SIGILL);
69
70 // x86 uses SIGFPE to report division by zero
71 if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") {
72 register(&mut PREV_SIGFPE, libc::SIGFPE);
73 }
74
75 // On ARM, handle Unaligned Accesses.
76 // On Darwin, guard page accesses are raised as SIGBUS.
77 if cfg!(target_arch = "arm") || cfg!(target_os = "macos") {
78 register(&mut PREV_SIGBUS, libc::SIGBUS);
79 }
80 }
81
82 unsafe extern "C" fn trap_handler(
83 signum: libc::c_int,
84 siginfo: *mut libc::siginfo_t,
85 context: *mut libc::c_void,
86 ) {
87 let previous = match signum {
88 libc::SIGSEGV => &PREV_SIGSEGV,
89 libc::SIGBUS => &PREV_SIGBUS,
90 libc::SIGFPE => &PREV_SIGFPE,
91 libc::SIGILL => &PREV_SIGILL,
92 _ => panic!("unknown signal: {}", signum),
93 };
94 let handled = tls::with(|info| {
95 // If no wasm code is executing, we don't handle this as a wasm
96 // trap.
97 let info = match info {
98 Some(info) => info,
99 None => return false,
100 };
101
102 // If we hit an exception while handling a previous trap, that's
103 // quite bad, so bail out and let the system handle this
104 // recursive segfault.
105 //
106 // Otherwise flag ourselves as handling a trap, do the trap
107 // handling, and reset our trap handling flag. Then we figure
108 // out what to do based on the result of the trap handling.
109 let jmp_buf = info.handle_trap(
110 get_pc(context),
111 |handler| handler(signum, siginfo, context),
112 );
113
114 // Figure out what to do based on the result of this handling of
115 // the trap. Note that our sentinel value of 1 means that the
116 // exception was handled by a custom exception handler, so we
117 // keep executing.
118 if jmp_buf.is_null() {
119 return false;
120 } else if jmp_buf as usize == 1 {
121 return true;
122 } else {
123 Unwind(jmp_buf)
124 }
125 });
126
127 if handled {
128 return;
129 }
130
131 // This signal is not for any compiled wasm code we expect, so we
132 // need to forward the signal to the next handler. If there is no
133 // next handler (SIG_IGN or SIG_DFL), then it's time to crash. To do
134 // this, we set the signal back to its original disposition and
135 // return. This will cause the faulting op to be re-executed which
136 // will crash in the normal way. If there is a next handler, call
137 // it. It will either crash synchronously, fix up the instruction
138 // so that execution can continue and return, or trigger a crash by
139 // returning the signal to it's original disposition and returning.
140 let previous = &*previous.as_ptr();
141 if previous.sa_flags & libc::SA_SIGINFO != 0 {
142 mem::transmute::<
143 usize,
144 extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void),
145 >(previous.sa_sigaction)(signum, siginfo, context)
146 } else if previous.sa_sigaction == libc::SIG_DFL ||
147 previous.sa_sigaction == libc::SIG_IGN
148 {
149 libc::sigaction(signum, previous, ptr::null_mut());
150 } else {
151 mem::transmute::<usize, extern "C" fn(libc::c_int)>(
152 previous.sa_sigaction
153 )(signum)
154 }
155 }
156
157 unsafe fn get_pc(cx: *mut libc::c_void) -> *const u8 {
158 cfg_if::cfg_if! {
159 if #[cfg(all(target_os = "linux", target_arch = "x86_64"))] {
160 let cx = &*(cx as *const libc::ucontext_t);
161 cx.uc_mcontext.gregs[libc::REG_RIP as usize] as *const u8
162 } else if #[cfg(all(target_os = "linux", target_arch = "aarch64"))] {
163 // libc doesn't seem to support Linux/aarch64 at the moment?
164 extern "C" {
165 fn GetPcFromUContext(cx: *mut libc::c_void) -> *const u8;
166 }
167 GetPcFromUContext(cx)
168 } else if #[cfg(target_os = "macos")] {
169 // FIXME(rust-lang/libc#1702) - once that lands and is
170 // released we should inline the definition here
171 extern "C" {
172 fn GetPcFromUContext(cx: *mut libc::c_void) -> *const u8;
173 }
174 GetPcFromUContext(cx)
175 } else {
176 compile_error!("unsupported platform");
177 }
178 }
179 }
180 } else if #[cfg(target_os = "windows")] {
181 use winapi::um::errhandlingapi::*;
182 use winapi::um::winnt::*;
183 use winapi::um::minwinbase::*;
184 use winapi::vc::excpt::*;
185
186 /// Function which may handle custom signals while processing traps.
187 pub type SignalHandler<'a> = dyn Fn(winapi::um::winnt::PEXCEPTION_POINTERS) -> bool
188 + Send + Sync + 'a;
189
190 unsafe fn platform_init() {
191 // our trap handler needs to go first, so that we can recover from
192 // wasm faults and continue execution, so pass `1` as a true value
193 // here.
194 if AddVectoredExceptionHandler(1, Some(exception_handler)).is_null() {
195 panic!("failed to add exception handler: {}", io::Error::last_os_error());
196 }
197 }
198
199 unsafe extern "system" fn exception_handler(
200 exception_info: PEXCEPTION_POINTERS
201 ) -> LONG {
202 // Check the kind of exception, since we only handle a subset within
203 // wasm code. If anything else happens we want to defer to whatever
204 // the rest of the system wants to do for this exception.
205 let record = &*(*exception_info).ExceptionRecord;
206 if record.ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
207 record.ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION &&
208 record.ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO &&
209 record.ExceptionCode != EXCEPTION_INT_OVERFLOW
210 {
211 return EXCEPTION_CONTINUE_SEARCH;
212 }
213
214 // FIXME: this is what the previous C++ did to make sure that TLS
215 // works by the time we execute this trap handling code. This isn't
216 // exactly super easy to call from Rust though and it's not clear we
217 // necessarily need to do so. Leaving this here in case we need this
218 // in the future, but for now we can probably wait until we see a
219 // strange fault before figuring out how to reimplement this in
220 // Rust.
221 //
222 // if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
223 // return EXCEPTION_CONTINUE_SEARCH;
224 // }
225
226 // This is basically the same as the unix version above, only with a
227 // few parameters tweaked here and there.
228 tls::with(|info| {
229 let info = match info {
230 Some(info) => info,
231 None => return EXCEPTION_CONTINUE_SEARCH,
232 };
233 let jmp_buf = info.handle_trap(
234 (*(*exception_info).ContextRecord).Rip as *const u8,
235 |handler| handler(exception_info),
236 );
237 if jmp_buf.is_null() {
238 EXCEPTION_CONTINUE_SEARCH
239 } else if jmp_buf as usize == 1 {
240 EXCEPTION_CONTINUE_EXECUTION
241 } else {
242 Unwind(jmp_buf)
243 }
244 })
245 }
246 }
247}
248
249/// This function performs the low-overhead signal handler initialization that
250/// we want to do eagerly to ensure a more-deterministic global process state.
251///
252/// This is especially relevant for signal handlers since handler ordering
253/// depends on installation order: the wasm signal handler must run *before*
254/// the other crash handlers and since POSIX signal handlers work LIFO, this
255/// function needs to be called at the end of the startup process, after other
256/// handlers have been installed. This function can thus be called multiple
257/// times, having no effect after the first call.
258pub fn init_traps() {
259 static INIT: Once = Once::new();
260 INIT.call_once(real_init);
261}
262
263fn real_init() {
264 unsafe {
265 platform_init();
266 }
267}
268
269/// Raises a user-defined trap immediately.
270///
271/// This function performs as-if a wasm trap was just executed, only the trap
272/// has a dynamic payload associated with it which is user-provided. This trap
273/// payload is then returned from `catch_traps` below.
274///
275/// # Safety
276///
277/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
278/// have been previously called. Additionally no Rust destructors can be on the
279/// stack. They will be skipped and not executed.
280pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
281 tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
282}
283
284/// Raises a trap from inside library code immediately.
285///
286/// This function performs as-if a wasm trap was just executed. This trap
287/// payload is then returned from `catch_traps` below.
288///
289/// # Safety
290///
291/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
292/// have been previously called. Additionally no Rust destructors can be on the
293/// stack. They will be skipped and not executed.
294pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
295 tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
296}
297
298/// Carries a Rust panic across wasm code and resumes the panic on the other
299/// side.
300///
301/// # Safety
302///
303/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
304/// have been previously called. Additionally no Rust destructors can be on the
305/// stack. They will be skipped and not executed.
306pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
307 tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
308}
309
310/// Stores trace message with backtrace.
311#[derive(Debug)]
312pub enum Trap {
313 /// A user-raised trap through `raise_user_trap`.
314 User(Box<dyn Error + Send + Sync>),
315
316 /// A trap raised from jit code
317 Jit {
318 /// The program counter in JIT code where this trap happened.
319 pc: usize,
320 /// Native stack backtrace at the time the trap occurred
321 backtrace: Backtrace,
322 /// An indicator for whether this may have been a trap generated from an
323 /// interrupt, used for switching what would otherwise be a stack
324 /// overflow trap to be an interrupt trap.
325 maybe_interrupted: bool,
326 },
327
328 /// A trap raised from a wasm libcall
329 Wasm {
330 /// Code of the trap.
331 trap_code: ir::TrapCode,
332 /// Native stack backtrace at the time the trap occurred
333 backtrace: Backtrace,
334 },
335
336 /// A trap indicating that the runtime was unable to allocate sufficient memory.
337 OOM {
338 /// Native stack backtrace at the time the OOM occurred
339 backtrace: Backtrace,
340 },
341}
342
343impl Trap {
344 /// Construct a new Wasm trap with the given source location and trap code.
345 ///
346 /// Internally saves a backtrace when constructed.
347 pub fn wasm(trap_code: ir::TrapCode) -> Self {
348 let backtrace = Backtrace::new_unresolved();
349 Trap::Wasm {
350 trap_code,
351 backtrace,
352 }
353 }
354
355 /// Construct a new OOM trap with the given source location and trap code.
356 ///
357 /// Internally saves a backtrace when constructed.
358 pub fn oom() -> Self {
359 let backtrace = Backtrace::new_unresolved();
360 Trap::OOM { backtrace }
361 }
362}
363
364/// Catches any wasm traps that happen within the execution of `closure`,
365/// returning them as a `Result`.
366///
367/// Highly unsafe since `closure` won't have any dtors run.
368pub unsafe fn catch_traps<F>(
369 vmctx: *mut VMContext,
370 max_wasm_stack: usize,
371 is_wasm_code: impl Fn(usize) -> bool,
372 signal_handler: Option<&SignalHandler>,
373 mut closure: F,
374) -> Result<(), Trap>
375where
376 F: FnMut(),
377{
378 // Ensure that we have our sigaltstack installed.
379 #[cfg(unix)]
380 setup_unix_sigaltstack()?;
381
382 return CallThreadState::new(vmctx, &is_wasm_code, signal_handler).with(max_wasm_stack, |cx| {
383 RegisterSetjmp(
384 cx.jmp_buf.as_ptr(),
385 call_closure::<F>,
386 &mut closure as *mut F as *mut u8,
387 )
388 });
389
390 extern "C" fn call_closure<F>(payload: *mut u8)
391 where
392 F: FnMut(),
393 {
394 unsafe { (*(payload as *mut F))() }
395 }
396}
397
398/// Temporary state stored on the stack which is registered in the `tls` module
399/// below for calls into wasm.
400pub struct CallThreadState<'a> {
401 unwind: Cell<UnwindReason>,
402 jmp_buf: Cell<*const u8>,
403 vmctx: *mut VMContext,
404 handling_trap: Cell<bool>,
405 is_wasm_code: &'a (dyn Fn(usize) -> bool + 'a),
406 signal_handler: Option<&'a SignalHandler<'a>>,
407}
408
409enum UnwindReason {
410 None,
411 Panic(Box<dyn Any + Send>),
412 UserTrap(Box<dyn Error + Send + Sync>),
413 LibTrap(Trap),
414 JitTrap { backtrace: Backtrace, pc: usize },
415}
416
417impl<'a> CallThreadState<'a> {
418 fn new(
419 vmctx: *mut VMContext,
420 is_wasm_code: &'a (dyn Fn(usize) -> bool + 'a),
421 signal_handler: Option<&'a SignalHandler<'a>>,
422 ) -> CallThreadState<'a> {
423 CallThreadState {
424 unwind: Cell::new(UnwindReason::None),
425 vmctx,
426 jmp_buf: Cell::new(ptr::null()),
427 handling_trap: Cell::new(false),
428 is_wasm_code,
429 signal_handler,
430 }
431 }
432
433 fn with(
434 self,
435 max_wasm_stack: usize,
436 closure: impl FnOnce(&CallThreadState) -> i32,
437 ) -> Result<(), Trap> {
438 let _reset = self.update_stack_limit(max_wasm_stack)?;
439 let ret = tls::set(&self, || closure(&self));
440 match self.unwind.replace(UnwindReason::None) {
441 UnwindReason::None => {
442 debug_assert_eq!(ret, 1);
443 Ok(())
444 }
445 UnwindReason::UserTrap(data) => {
446 debug_assert_eq!(ret, 0);
447 Err(Trap::User(data))
448 }
449 UnwindReason::LibTrap(trap) => Err(trap),
450 UnwindReason::JitTrap { backtrace, pc } => {
451 debug_assert_eq!(ret, 0);
452 let maybe_interrupted = unsafe {
453 (*self.vmctx).instance().interrupts.stack_limit.load(SeqCst)
454 == wasmtime_environ::INTERRUPTED
455 };
456 Err(Trap::Jit {
457 pc,
458 backtrace,
459 maybe_interrupted,
460 })
461 }
462 UnwindReason::Panic(panic) => {
463 debug_assert_eq!(ret, 0);
464 std::panic::resume_unwind(panic)
465 }
466 }
467 }
468
469 /// Checks and/or initializes the wasm native call stack limit.
470 ///
471 /// This function will inspect the current state of the stack and calling
472 /// context to determine which of three buckets we're in:
473 ///
474 /// 1. We are the first wasm call on the stack. This means that we need to
475 /// set up a stack limit where beyond which if the native wasm stack
476 /// pointer goes beyond forces a trap. For now we simply reserve an
477 /// arbitrary chunk of bytes (1 MB from roughly the current native stack
478 /// pointer). This logic will likely get tweaked over time.
479 ///
480 /// 2. We aren't the first wasm call on the stack. In this scenario the wasm
481 /// stack limit is already configured. This case of wasm -> host -> wasm
482 /// we assume that the native stack consumed by the host is accounted for
483 /// in the initial stack limit calculation. That means that in this
484 /// scenario we do nothing.
485 ///
486 /// 3. We were previously interrupted. In this case we consume the interrupt
487 /// here and return a trap, clearing the interrupt and allowing the next
488 /// wasm call to proceed.
489 ///
490 /// The return value here is a trap for case 3, a noop destructor in case 2,
491 /// and a meaningful destructor in case 1
492 ///
493 /// For more information about interrupts and stack limits see
494 /// `crates/environ/src/cranelift.rs`.
495 ///
496 /// Note that this function must be called with `self` on the stack, not the
497 /// heap/etc.
498 fn update_stack_limit(&self, max_wasm_stack: usize) -> Result<impl Drop + '_, Trap> {
499 // Make an "educated guess" to figure out where the wasm sp value should
500 // start trapping if it drops below.
501 let wasm_stack_limit = self as *const _ as usize - max_wasm_stack;
502
503 let interrupts = unsafe { &**(&*self.vmctx).instance().interrupts() };
504 let reset_stack_limit = match interrupts.stack_limit.compare_exchange(
505 usize::max_value(),
506 wasm_stack_limit,
507 SeqCst,
508 SeqCst,
509 ) {
510 Ok(_) => {
511 // We're the first wasm on the stack so we've now reserved the
512 // `max_wasm_stack` bytes of native stack space for wasm.
513 // Nothing left to do here now except reset back when we're
514 // done.
515 true
516 }
517 Err(n) if n == wasmtime_environ::INTERRUPTED => {
518 // This means that an interrupt happened before we actually
519 // called this function, which means that we're now
520 // considered interrupted. Be sure to consume this interrupt
521 // as part of this process too.
522 interrupts.stack_limit.store(usize::max_value(), SeqCst);
523 return Err(Trap::Wasm {
524 trap_code: ir::TrapCode::Interrupt,
525 backtrace: Backtrace::new_unresolved(),
526 });
527 }
528 Err(_) => {
529 // The stack limit was previously set by a previous wasm
530 // call on the stack. We leave the original stack limit for
531 // wasm in place in that case, and don't reset the stack
532 // limit when we're done.
533 false
534 }
535 };
536
537 struct Reset<'a>(bool, &'a AtomicUsize);
538
539 impl Drop for Reset<'_> {
540 fn drop(&mut self) {
541 if self.0 {
542 self.1.store(usize::max_value(), SeqCst);
543 }
544 }
545 }
546
547 Ok(Reset(reset_stack_limit, &interrupts.stack_limit))
548 }
549
550 fn unwind_with(&self, reason: UnwindReason) -> ! {
551 self.unwind.replace(reason);
552 unsafe {
553 Unwind(self.jmp_buf.get());
554 }
555 }
556
557 /// Trap handler using our thread-local state.
558 ///
559 /// * `pc` - the program counter the trap happened at
560 /// * `call_handler` - a closure used to invoke the platform-specific
561 /// signal handler for each instance, if available.
562 ///
563 /// Attempts to handle the trap if it's a wasm trap. Returns a few
564 /// different things:
565 ///
566 /// * null - the trap didn't look like a wasm trap and should continue as a
567 /// trap
568 /// * 1 as a pointer - the trap was handled by a custom trap handler on an
569 /// instance, and the trap handler should quickly return.
570 /// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
571 /// the wasm trap was succesfully handled.
572 fn handle_trap(
573 &self,
574 pc: *const u8,
575 call_handler: impl Fn(&SignalHandler) -> bool,
576 ) -> *const u8 {
577 // If we hit a fault while handling a previous trap, that's quite bad,
578 // so bail out and let the system handle this recursive segfault.
579 //
580 // Otherwise flag ourselves as handling a trap, do the trap handling,
581 // and reset our trap handling flag.
582 if self.handling_trap.replace(true) {
583 return ptr::null();
584 }
585 let _reset = ResetCell(&self.handling_trap, false);
586
587 // If we haven't even started to handle traps yet, bail out.
588 if self.jmp_buf.get().is_null() {
589 return ptr::null();
590 }
591
592 // If this fault wasn't in wasm code, then it's not our problem
593 if !(self.is_wasm_code)(pc as usize) {
594 return ptr::null();
595 }
596
597 // First up see if any instance registered has a custom trap handler,
598 // in which case run them all. If anything handles the trap then we
599 // return that the trap was handled.
600 if let Some(handler) = self.signal_handler {
601 if call_handler(handler) {
602 return 1 as *const _;
603 }
604 }
605
606 // TODO: stack overflow can happen at any random time (i.e. in malloc()
607 // in memory.grow) and it's really hard to determine if the cause was
608 // stack overflow and if it happened in WebAssembly module.
609 //
610 // So, let's assume that any untrusted code called from WebAssembly
611 // doesn't trap. Then, if we have called some WebAssembly code, it
612 // means the trap is stack overflow.
613 if self.jmp_buf.get().is_null() {
614 return ptr::null();
615 }
616 let backtrace = Backtrace::new_unresolved();
617 self.unwind.replace(UnwindReason::JitTrap {
618 backtrace,
619 pc: pc as usize,
620 });
621 self.jmp_buf.get()
622 }
623}
624
625struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
626
627impl<T: Copy> Drop for ResetCell<'_, T> {
628 fn drop(&mut self) {
629 self.0.set(self.1);
630 }
631}
632
633// A private inner module for managing the TLS state that we require across
634// calls in wasm. The WebAssembly code is called from C++ and then a trap may
635// happen which requires us to read some contextual state to figure out what to
636// do with the trap. This `tls` module is used to persist that information from
637// the caller to the trap site.
638mod tls {
639 use super::CallThreadState;
640 use std::cell::Cell;
641 use std::mem;
642 use std::ptr;
643
644 thread_local!(static PTR: Cell<*const CallThreadState<'static>> = Cell::new(ptr::null()));
645
646 /// Configures thread local state such that for the duration of the
647 /// execution of `closure` any call to `with` will yield `ptr`, unless this
648 /// is recursively called again.
649 pub fn set<R>(ptr: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
650 struct Reset<'a, T: Copy>(&'a Cell<T>, T);
651
652 impl<T: Copy> Drop for Reset<'_, T> {
653 fn drop(&mut self) {
654 self.0.set(self.1);
655 }
656 }
657
658 PTR.with(|p| {
659 // Note that this extension of the lifetime to `'static` should be
660 // safe because we only ever access it below with an anonymous
661 // lifetime, meaning `'static` never leaks out of this module.
662 let ptr = unsafe {
663 mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(ptr)
664 };
665 let _r = Reset(p, p.replace(ptr));
666 closure()
667 })
668 }
669
670 /// Returns the last pointer configured with `set` above. Panics if `set`
671 /// has not been previously called.
672 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
673 PTR.with(|ptr| {
674 let p = ptr.get();
675 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
676 })
677 }
678}
679
680/// A module for registering a custom alternate signal stack (sigaltstack).
681///
682/// Rust's libstd installs an alternate stack with size `SIGSTKSZ`, which is not
683/// always large enough for our signal handling code. Override it by creating
684/// and registering our own alternate stack that is large enough and has a guard
685/// page.
686#[cfg(unix)]
687fn setup_unix_sigaltstack() -> Result<(), Trap> {
688 use std::cell::RefCell;
689 use std::convert::TryInto;
690 use std::ptr::null_mut;
691
692 thread_local! {
693 /// Thread-local state is lazy-initialized on the first time it's used,
694 /// and dropped when the thread exits.
695 static TLS: RefCell<Tls> = RefCell::new(Tls::None);
696 }
697
698 /// The size of the sigaltstack (not including the guard, which will be
699 /// added). Make this large enough to run our signal handlers.
700 const MIN_STACK_SIZE: usize = 16 * 4096;
701
702 enum Tls {
703 None,
704 Allocated {
705 mmap_ptr: *mut libc::c_void,
706 mmap_size: usize,
707 },
708 BigEnough,
709 }
710
711 return TLS.with(|slot| unsafe {
712 let mut slot = slot.borrow_mut();
713 match *slot {
714 Tls::None => {}
715 // already checked
716 _ => return Ok(()),
717 }
718
719 // Check to see if the existing sigaltstack, if it exists, is big
720 // enough. If so we don't need to allocate our own.
721 let mut old_stack = mem::zeroed();
722 let r = libc::sigaltstack(ptr::null(), &mut old_stack);
723 assert_eq!(r, 0, "learning about sigaltstack failed");
724 if old_stack.ss_flags & libc::SS_DISABLE == 0 && old_stack.ss_size >= MIN_STACK_SIZE {
725 *slot = Tls::BigEnough;
726 return Ok(());
727 }
728
729 // ... but failing that we need to allocate our own, so do all that
730 // here.
731 let page_size: usize = libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap();
732 let guard_size = page_size;
733 let alloc_size = guard_size + MIN_STACK_SIZE;
734
735 let ptr = libc::mmap(
736 null_mut(),
737 alloc_size,
738 libc::PROT_NONE,
739 libc::MAP_PRIVATE | libc::MAP_ANON,
740 -1,
741 0,
742 );
743 if ptr == libc::MAP_FAILED {
744 return Err(Trap::oom());
745 }
746
747 // Prepare the stack with readable/writable memory and then register it
748 // with `sigaltstack`.
749 let stack_ptr = (ptr as usize + guard_size) as *mut libc::c_void;
750 let r = libc::mprotect(
751 stack_ptr,
752 MIN_STACK_SIZE,
753 libc::PROT_READ | libc::PROT_WRITE,
754 );
755 assert_eq!(r, 0, "mprotect to configure memory for sigaltstack failed");
756 let new_stack = libc::stack_t {
757 ss_sp: stack_ptr,
758 ss_flags: 0,
759 ss_size: MIN_STACK_SIZE,
760 };
761 let r = libc::sigaltstack(&new_stack, ptr::null_mut());
762 assert_eq!(r, 0, "registering new sigaltstack failed");
763
764 *slot = Tls::Allocated {
765 mmap_ptr: ptr,
766 mmap_size: alloc_size,
767 };
768 Ok(())
769 });
770
771 impl Drop for Tls {
772 fn drop(&mut self) {
773 let (ptr, size) = match self {
774 Tls::Allocated {
775 mmap_ptr,
776 mmap_size,
777 } => (*mmap_ptr, *mmap_size),
778 _ => return,
779 };
780 unsafe {
781 // Deallocate the stack memory.
782 let r = libc::munmap(ptr, size);
783 debug_assert_eq!(r, 0, "munmap failed during thread shutdown");
784 }
785 }
786 }
787}