preemptive_threads/security/
stack_protection.rs1use crate::errors::ThreadError;
4use crate::security::{SecurityConfig, SecurityViolation, SECURITY_STATE, handle_security_violation};
5use crate::mem::Stack;
6use portable_atomic::{AtomicUsize, Ordering};
7use alloc::alloc;
8
9const STACK_CANARY_MAGIC: u64 = 0xDEADBEEFCAFEBABE;
11
12pub struct StackProtection {
14 canaries_placed: AtomicUsize,
16 violations_detected: AtomicUsize,
18 guard_pages_allocated: AtomicUsize,
20}
21
22impl StackProtection {
23 pub const fn new() -> Self {
24 Self {
25 canaries_placed: AtomicUsize::new(0),
26 violations_detected: AtomicUsize::new(0),
27 guard_pages_allocated: AtomicUsize::new(0),
28 }
29 }
30}
31
32static STACK_PROTECTION: StackProtection = StackProtection::new();
34
35#[repr(C)]
37#[derive(Clone, Copy)]
38pub struct StackCanary {
39 magic: u64,
40 thread_id: u64,
41 timestamp: u64,
42}
43
44impl StackCanary {
45 pub fn new(thread_id: u64) -> Self {
47 let timestamp = crate::time::get_monotonic_time().as_nanos() as u64;
48 Self {
49 magic: STACK_CANARY_MAGIC,
50 thread_id,
51 timestamp,
52 }
53 }
54
55 pub fn check(&self, expected_thread_id: u64) -> bool {
57 self.magic == STACK_CANARY_MAGIC && self.thread_id == expected_thread_id
58 }
59
60 pub fn validate(&self, expected_thread_id: u64) {
62 if !self.check(expected_thread_id) {
63 handle_security_violation(SecurityViolation::StackCanaryViolation);
64 }
65 }
66}
67
68#[cfg(feature = "mmu")]
70pub struct GuardPage {
71 start_addr: usize,
72 size: usize,
73 is_protected: bool,
74}
75
76#[cfg(feature = "mmu")]
77impl GuardPage {
78 pub fn new(stack_base: usize, stack_size: usize) -> Result<Self, ThreadError> {
80 let page_size = get_page_size();
81
82 let guard_addr = stack_base - page_size;
84
85 unsafe {
87 if mprotect(guard_addr as *mut u8, page_size, PROT_NONE) != 0 {
88 return Err(ThreadError::MemoryError("Failed to create guard page".into()));
89 }
90 }
91
92 STACK_PROTECTION.guard_pages_allocated.fetch_add(1, Ordering::Relaxed);
93
94 Ok(Self {
95 start_addr: guard_addr,
96 size: page_size,
97 is_protected: true,
98 })
99 }
100
101 pub fn unprotect(&mut self) -> Result<(), ThreadError> {
103 if self.is_protected {
104 unsafe {
105 if mprotect(self.start_addr as *mut u8, self.size, PROT_READ | PROT_WRITE) != 0 {
106 return Err(ThreadError::MemoryError("Failed to remove guard page".into()));
107 }
108 }
109 self.is_protected = false;
110 }
111 Ok(())
112 }
113}
114
115#[cfg(feature = "mmu")]
116impl Drop for GuardPage {
117 fn drop(&mut self) {
118 let _ = self.unprotect();
119 }
120}
121
122pub struct ProtectedStack {
124 stack: Stack,
125 canary_bottom: Option<StackCanary>,
126 canary_top: Option<StackCanary>,
127 #[cfg(feature = "mmu")]
128 guard_page: Option<GuardPage>,
129 thread_id: u64,
130}
131
132impl ProtectedStack {
133 pub fn new(stack: Stack, thread_id: u64, config: SecurityConfig) -> Result<Self, ThreadError> {
135 let mut protected = Self {
136 stack,
137 canary_bottom: None,
138 canary_top: None,
139 #[cfg(feature = "mmu")]
140 guard_page: None,
141 thread_id,
142 };
143
144 if config.enable_stack_canaries {
146 protected.place_canaries()?;
147 }
148
149 #[cfg(feature = "mmu")]
151 if config.enable_guard_pages {
152 let guard_page = GuardPage::new(
153 protected.stack.base() as usize,
154 protected.stack.size(),
155 )?;
156 protected.guard_page = Some(guard_page);
157 }
158
159 Ok(protected)
160 }
161
162 fn place_canaries(&mut self) -> Result<(), ThreadError> {
164 let canary = StackCanary::new(self.thread_id);
165
166 unsafe {
168 let bottom_ptr = self.stack.bottom() as *mut StackCanary;
169 bottom_ptr.write(canary);
170 self.canary_bottom = Some(canary);
171 }
172
173 let canary_offset = core::mem::size_of::<StackCanary>() * 2;
175 unsafe {
176 let top_ptr = (self.stack.top() as usize - canary_offset) as *mut StackCanary;
177 top_ptr.write(canary);
178 self.canary_top = Some(canary);
179 }
180
181 STACK_PROTECTION.canaries_placed.fetch_add(2, Ordering::Relaxed);
182 Ok(())
183 }
184
185 pub fn check_canaries(&self) -> bool {
187 let mut valid = true;
188
189 if let Some(expected_canary) = &self.canary_bottom {
190 unsafe {
191 let bottom_ptr = self.stack.bottom() as *const StackCanary;
192 let actual_canary = &*bottom_ptr;
193 if !actual_canary.check(self.thread_id) {
194 valid = false;
195 }
196 }
197 }
198
199 if let Some(expected_canary) = &self.canary_top {
200 let canary_offset = core::mem::size_of::<StackCanary>() * 2;
201 unsafe {
202 let top_ptr = (self.stack.top() as usize - canary_offset) as *const StackCanary;
203 let actual_canary = &*top_ptr;
204 if !actual_canary.check(self.thread_id) {
205 valid = false;
206 }
207 }
208 }
209
210 if !valid {
211 STACK_PROTECTION.violations_detected.fetch_add(1, Ordering::Relaxed);
212 }
213
214 valid
215 }
216
217 pub fn validate_canaries(&self) {
219 if !self.check_canaries() {
220 handle_security_violation(SecurityViolation::StackCanaryViolation);
221 }
222 }
223
224 pub fn stack(&self) -> &Stack {
226 &self.stack
227 }
228
229 pub fn stack_mut(&mut self) -> &mut Stack {
231 &mut self.stack
232 }
233
234 pub fn usable_range(&self) -> (usize, usize) {
236 let start = if self.canary_bottom.is_some() {
237 self.stack.bottom() as usize + core::mem::size_of::<StackCanary>()
238 } else {
239 self.stack.bottom() as usize
240 };
241
242 let end = if self.canary_top.is_some() {
243 self.stack.top() as usize - core::mem::size_of::<StackCanary>() * 2
244 } else {
245 self.stack.top() as usize
246 };
247
248 (start, end)
249 }
250}
251
252#[derive(Debug, Clone)]
254pub struct StackProtectionStats {
255 pub canaries_placed: usize,
256 pub violations_detected: usize,
257 pub guard_pages_allocated: usize,
258 pub protection_enabled: bool,
259}
260
261pub fn init_stack_protection(config: SecurityConfig) -> Result<(), ThreadError> {
263 #[cfg(feature = "mmu")]
264 if config.enable_guard_pages {
265 if !is_mmu_available() {
267 return Err(ThreadError::UnsupportedOperation(
268 "Guard pages require MMU support".into()
269 ));
270 }
271 }
272
273 Ok(())
276}
277
278pub fn get_stack_protection_stats() -> StackProtectionStats {
280 StackProtectionStats {
281 canaries_placed: STACK_PROTECTION.canaries_placed.load(Ordering::Relaxed),
282 violations_detected: STACK_PROTECTION.violations_detected.load(Ordering::Relaxed),
283 guard_pages_allocated: STACK_PROTECTION.guard_pages_allocated.load(Ordering::Relaxed),
284 protection_enabled: SECURITY_STATE.canaries_enabled.load(Ordering::Relaxed) ||
285 SECURITY_STATE.guards_enabled.load(Ordering::Relaxed),
286 }
287}
288
289#[cfg(all(feature = "mmu", target_os = "linux"))]
292mod linux_impl {
293 use super::*;
294
295 pub const PROT_NONE: i32 = 0;
296 pub const PROT_READ: i32 = 1;
297 pub const PROT_WRITE: i32 = 2;
298
299 extern "C" {
300 fn mprotect(addr: *mut u8, len: usize, prot: i32) -> i32;
301 fn sysconf(name: i32) -> i64;
302 }
303
304 const _SC_PAGESIZE: i32 = 30;
305
306 pub unsafe fn mprotect(addr: *mut u8, len: usize, prot: i32) -> i32 {
307 mprotect(addr, len, prot)
308 }
309
310 pub fn get_page_size() -> usize {
311 unsafe { sysconf(_SC_PAGESIZE) as usize }
312 }
313
314 pub fn is_mmu_available() -> bool {
315 true }
317}
318
319#[cfg(all(feature = "mmu", not(target_os = "linux")))]
320mod generic_impl {
321 use super::*;
322
323 pub unsafe fn mprotect(_addr: *mut u8, _len: usize, _prot: i32) -> i32 {
325 -1 }
327
328 pub fn get_page_size() -> usize {
329 4096 }
331
332 pub fn is_mmu_available() -> bool {
333 false
334 }
335}
336
337#[cfg(all(feature = "mmu", target_os = "linux"))]
338use linux_impl::*;
339
340#[cfg(not(feature = "mmu"))]
341mod generic_impl {
342 use super::*;
343
344 pub(super) fn allocate_guard_pages(size: usize) -> Result<*mut u8, ThreadError> {
345 let layout = core::alloc::Layout::from_size_align(size, 4096)
347 .map_err(|_| ThreadError::MemoryError())?;
348 let ptr = unsafe { alloc::alloc_zeroed(layout) };
349 if ptr.is_null() {
350 Err(ThreadError::MemoryError())
351 } else {
352 Ok(ptr)
353 }
354 }
355
356 pub(super) fn deallocate_guard_pages(ptr: *mut u8, size: usize) {
357 if !ptr.is_null() {
358 let layout = core::alloc::Layout::from_size_align(size, 4096).unwrap();
359 unsafe { alloc::dealloc(ptr, layout) };
360 }
361 }
362}
363
364
365pub struct StackCanaryGuard<'a> {
367 protected_stack: &'a ProtectedStack,
368}
369
370impl<'a> StackCanaryGuard<'a> {
371 pub fn new(protected_stack: &'a ProtectedStack) -> Self {
372 Self { protected_stack }
373 }
374}
375
376impl<'a> Drop for StackCanaryGuard<'a> {
377 fn drop(&mut self) {
378 self.protected_stack.validate_canaries();
379 }
380}
381
382#[macro_export]
384macro_rules! stack_canary_guard {
385 ($stack:expr) => {
386 let _canary_guard = crate::security::stack_protection::StackCanaryGuard::new($stack);
387 };
388}