1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

/// Stack bounds for conservative stack marking. 
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct StackBounds {
    pub origin: *mut u8,
    pub bound: *mut u8,
}

#[cfg(any(target_os = "macos", target_os = "ios"))]
impl StackBounds {
    pub unsafe fn new_thread_stack_bounds(thread: libc::pthread_t) -> Self {
        let origin = libc::pthread_get_stackaddr_np(thread);
        let size = libc::pthread_get_stacksize_np(thread);
        let bound = origin.add(size);
        Self {
            origin: origin.cast(),
            bound: bound.cast(),
        }
    }
    pub fn current_thread_stack_bounds() -> Self {
        unsafe { Self::new_thread_stack_bounds(thread_self() as _) }
    }
}

#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
impl StackBounds {
    #[cfg(target_os = "openbsd")]
    unsafe fn new_thread_stack_bounds(thread: libc::pthread_t) -> Self {
        let mut stack: libc::stack_t = core::mem::MaybeUninit::zeroed().assume_init();
        libc::pthread_stackseg_np(thread, &mut stack);
        let origin = stack.ss_sp;
        let bound = stack.origin.sub(stack.ss_size);
        return Self {
            origin: origin.cast(),
            bound: bound.cast(),
        };
    }

    #[cfg(not(target_os = "openbsd"))]
    unsafe fn new_thread_stack_bounds(thread: libc::pthread_t) -> Self {
        let mut bound = core::ptr::null_mut::<libc::c_void>();
        let mut stack_size = 0;
        let mut sattr: libc::pthread_attr_t = core::mem::MaybeUninit::zeroed().assume_init();
        libc::pthread_attr_init(&mut sattr);
        #[cfg(any(target_os = "freebsd", target_os = "netbsd"))]
        {
            libc::pthread_attr_get_np(thread, &mut sattr);
        }
        #[cfg(not(any(target_os = "freebsd", target_os = "netbsd")))]
        {
            libc::pthread_getattr_np(thread, &mut sattr);
        }
        let _rc = libc::pthread_attr_getstack(&sattr, &mut bound, &mut stack_size);
        libc::pthread_attr_destroy(&mut sattr);
        let origin = bound.add(stack_size);
        Self {
            bound: bound.cast(),
            origin: origin.cast(),
        }
    }

    pub fn current_thread_stack_bounds() -> Self {
        unsafe { Self::new_thread_stack_bounds(thread_self() as _) }
    }
}
#[allow(dead_code)]
pub(crate) fn thread_self() -> u64 {
    #[cfg(windows)]
    unsafe {
        extern "C" {
            fn GetCurrentThreadId() -> u32;
        }
        GetCurrentThreadId() as u64
    }
    #[cfg(unix)]
    unsafe {
        libc::pthread_self() as u64
    }
}
#[cfg(windows)]
impl StackBounds {
    pub unsafe fn current_thread_stack_bounds_internal() -> Self {
        use winapi::um::memoryapi::*;
        use winapi::um::winnt::*;
        let mut stack_origin: MEMORY_BASIC_INFORMATION =
            core::mem::MaybeUninit::zeroed().assume_init();
        VirtualQuery(
            &mut stack_origin as *mut MEMORY_BASIC_INFORMATION as *mut _,
            &mut stack_origin,
            core::mem::size_of::<MEMORY_BASIC_INFORMATION>(),
        );

        let origin = stack_origin
            .BaseAddress
            .cast::<u8>()
            .add(stack_origin.RegionSize as _);
        // The stack on Windows consists out of three parts (uncommitted memory, a guard page and present
        // committed memory). The 3 regions have different BaseAddresses but all have the same AllocationBase
        // since they are all from the same VirtualAlloc. The 3 regions are laid out in memory (from high to
        // low) as follows:
        //
        //    High |-------------------|  -----
        //         | committedMemory   |    ^
        //         |-------------------|    |
        //         | guardPage         | reserved memory for the stack
        //         |-------------------|    |
        //         | uncommittedMemory |    v
        //    Low  |-------------------|  ----- <--- stackOrigin.AllocationBase
        //
        // See http://msdn.microsoft.com/en-us/library/ms686774%28VS.85%29.aspx for more information.
        let mut uncommitted_memory: MEMORY_BASIC_INFORMATION =
            core::mem::MaybeUninit::zeroed().assume_init();
        VirtualQuery(
            stack_origin.AllocationBase as *mut _,
            &mut uncommitted_memory,
            core::mem::size_of::<MEMORY_BASIC_INFORMATION>(),
        );
        let mut guard_page: MEMORY_BASIC_INFORMATION =
            core::mem::MaybeUninit::zeroed().assume_init();
        VirtualQuery(
            uncommitted_memory
                .BaseAddress
                .cast::<u8>()
                .add(uncommitted_memory.RegionSize as _)
                .cast(),
            &mut guard_page,
            core::mem::size_of::<MEMORY_BASIC_INFORMATION>(),
        );
        let end_of_stack = stack_origin.AllocationBase as *mut u8;
        let bound = end_of_stack.add(guard_page.RegionSize as _);
        Self {
            origin: origin as *mut u8,
            bound,
        }
    }

    pub fn current_thread_stack_bounds() -> Self {
        unsafe { Self::current_thread_stack_bounds_internal() }
    }
}