preemptive_threads/security/
aslr.rs1use crate::errors::ThreadError;
4use crate::security::{SecurityConfig, crypto_rng::secure_random_u64};
5use portable_atomic::{AtomicU64, AtomicUsize, Ordering};
6use alloc::{vec, vec::Vec};
7
8pub struct AslrManager {
10 randomizations_applied: AtomicUsize,
12 entropy_consumed: AtomicU64,
14 address_space_layout: AddressSpaceLayout,
16}
17
18impl AslrManager {
19 pub fn new() -> Self {
20 Self {
21 randomizations_applied: AtomicUsize::new(0),
22 entropy_consumed: AtomicU64::new(0),
23 address_space_layout: AddressSpaceLayout::detect(),
24 }
25 }
26
27 pub fn randomize_stack_address(
29 &self,
30 base_address: usize,
31 stack_size: usize,
32 ) -> Result<usize, ThreadError> {
33 let entropy_bits = self.address_space_layout.available_entropy_bits();
34 if entropy_bits == 0 {
35 return Ok(base_address); }
37
38 let max_offset = self.address_space_layout.max_stack_offset(stack_size);
40 let random_offset = self.generate_random_offset(max_offset)?;
41
42 let page_size = self.address_space_layout.page_size;
44 let aligned_offset = (random_offset / page_size) * page_size;
45
46 let randomized_address = base_address.wrapping_add(aligned_offset);
47
48 if !self.address_space_layout.is_valid_stack_address(randomized_address, stack_size) {
50 return Err(ThreadError::MemoryError());
51 }
52
53 self.randomizations_applied.fetch_add(1, Ordering::Relaxed);
54 Ok(randomized_address)
55 }
56
57 pub fn randomize_heap_address(
59 &self,
60 base_address: usize,
61 allocation_size: usize,
62 ) -> Result<usize, ThreadError> {
63 let max_offset = self.address_space_layout.max_heap_offset(allocation_size);
64 let random_offset = self.generate_random_offset(max_offset)?;
65
66 let align_size = core::mem::align_of::<usize>();
68 let aligned_offset = (random_offset / align_size) * align_size;
69
70 let randomized_address = base_address.wrapping_add(aligned_offset);
71
72 if !self.address_space_layout.is_valid_heap_address(randomized_address, allocation_size) {
73 return Err(ThreadError::MemoryError());
74 }
75
76 self.randomizations_applied.fetch_add(1, Ordering::Relaxed);
77 Ok(randomized_address)
78 }
79
80 pub fn generate_guard_gap(&self) -> Result<usize, ThreadError> {
82 let min_gap = self.address_space_layout.page_size;
83 let max_gap = min_gap * 16; let random_pages = self.generate_random_offset(16)? + 1;
86 Ok(random_pages * min_gap)
87 }
88
89 pub fn generate_thread_layout(&self) -> Result<ThreadMemoryLayout, ThreadError> {
91 let stack_base = self.address_space_layout.stack_base_address();
92 let heap_base = self.address_space_layout.heap_base_address();
93
94 let stack_size = 1024 * 1024; let randomized_stack = self.randomize_stack_address(stack_base, stack_size)?;
97
98 let heap_size = 64 * 1024 * 1024; let randomized_heap = self.randomize_heap_address(heap_base, heap_size)?;
101
102 let stack_guard_gap = self.generate_guard_gap()?;
104 let heap_guard_gap = self.generate_guard_gap()?;
105
106 Ok(ThreadMemoryLayout {
107 stack_base: randomized_stack,
108 stack_size,
109 stack_guard_gap,
110 heap_base: randomized_heap,
111 heap_size,
112 heap_guard_gap,
113 randomization_entropy: self.entropy_consumed.load(Ordering::Relaxed),
114 })
115 }
116
117 fn generate_random_offset(&self, max_offset: usize) -> Result<usize, ThreadError> {
119 if max_offset == 0 {
120 return Ok(0);
121 }
122
123 let random_value = secure_random_u64()?;
124 self.entropy_consumed.fetch_add(8, Ordering::Relaxed);
125
126 Ok((random_value as usize) % max_offset)
127 }
128}
129
130#[derive(Debug, Clone)]
132pub struct AddressSpaceLayout {
133 pub page_size: usize,
135 pub address_space_size: usize,
137 pub stack_region: MemoryRegion,
139 pub heap_region: MemoryRegion,
141 pub arch_constraints: ArchConstraints,
143}
144
145impl AddressSpaceLayout {
146 pub fn detect() -> Self {
148 Self {
149 page_size: detect_page_size(),
150 address_space_size: detect_address_space_size(),
151 stack_region: detect_stack_region(),
152 heap_region: detect_heap_region(),
153 arch_constraints: ArchConstraints::detect(),
154 }
155 }
156
157 pub fn available_entropy_bits(&self) -> u32 {
159 #[cfg(target_pointer_width = "64")]
160 {
161 match self.arch_constraints.arch {
163 Architecture::X86_64 => 28, Architecture::Aarch64 => 32, Architecture::Riscv64 => 30, _ => 20, }
168 }
169
170 #[cfg(target_pointer_width = "32")]
171 {
172 16 }
175 }
176
177 pub fn max_stack_offset(&self, stack_size: usize) -> usize {
179 let available_space = self.stack_region.size.saturating_sub(stack_size);
180 available_space / 2 }
182
183 pub fn max_heap_offset(&self, heap_size: usize) -> usize {
185 let available_space = self.heap_region.size.saturating_sub(heap_size);
186 available_space / 4 }
188
189 pub fn is_valid_stack_address(&self, address: usize, size: usize) -> bool {
191 address >= self.stack_region.start &&
192 address + size <= self.stack_region.start + self.stack_region.size
193 }
194
195 pub fn is_valid_heap_address(&self, address: usize, size: usize) -> bool {
197 address >= self.heap_region.start &&
198 address + size <= self.heap_region.start + self.heap_region.size
199 }
200
201 pub fn stack_base_address(&self) -> usize {
203 self.stack_region.start + (self.stack_region.size / 4)
204 }
205
206 pub fn heap_base_address(&self) -> usize {
208 self.heap_region.start + (self.heap_region.size / 8)
209 }
210}
211
212#[derive(Debug, Clone)]
214pub struct MemoryRegion {
215 pub start: usize,
216 pub size: usize,
217}
218
219#[derive(Debug, Clone)]
221pub struct ArchConstraints {
222 pub arch: Architecture,
223 pub min_alignment: usize,
224 pub max_randomization: usize,
225 pub forbidden_ranges: Vec<MemoryRegion>,
226}
227
228impl ArchConstraints {
229 pub fn detect() -> Self {
230 #[cfg(feature = "x86_64")]
231 {
232 return Self {
233 arch: Architecture::X86_64,
234 min_alignment: 4096, max_randomization: 1 << 28, forbidden_ranges: vec![
237 MemoryRegion { start: 0xFFFF800000000000, size: usize::MAX },
239 ],
240 };
241 }
242
243 #[cfg(feature = "arm64")]
244 {
245 Self {
246 arch: Architecture::Aarch64,
247 min_alignment: 4096,
248 max_randomization: 1 << 32, forbidden_ranges: vec![
250 MemoryRegion { start: 0xFFFF000000000000, size: usize::MAX },
252 ],
253 }
254 }
255
256 #[cfg(not(any(feature = "x86_64", feature = "arm64")))]
257 {
258 Self {
259 arch: Architecture::Generic,
260 min_alignment: core::mem::size_of::<usize>(),
261 max_randomization: 1 << 20, forbidden_ranges: Vec::new(),
263 }
264 }
265 }
266}
267
268#[derive(Debug, Clone, Copy, PartialEq, Eq)]
270pub enum Architecture {
271 X86_64,
272 Aarch64,
273 Riscv64,
274 Generic,
275}
276
277#[derive(Debug, Clone)]
279pub struct ThreadMemoryLayout {
280 pub stack_base: usize,
281 pub stack_size: usize,
282 pub stack_guard_gap: usize,
283 pub heap_base: usize,
284 pub heap_size: usize,
285 pub heap_guard_gap: usize,
286 pub randomization_entropy: u64,
287}
288
289impl ThreadMemoryLayout {
290 pub fn create_randomized_stack(&self) -> Result<RandomizedStack, ThreadError> {
292 Ok(RandomizedStack {
294 layout: self.clone(),
295 actual_address: self.stack_base,
296 entropy_used: 64, })
298 }
299}
300
301pub struct RandomizedStack {
303 pub layout: ThreadMemoryLayout,
304 pub actual_address: usize,
305 pub entropy_used: u32,
306}
307
308impl RandomizedStack {
309 pub fn bounds(&self) -> (usize, usize) {
311 let start = self.actual_address + self.layout.stack_guard_gap;
312 let end = start + self.layout.stack_size - self.layout.stack_guard_gap;
313 (start, end)
314 }
315
316 pub fn contains(&self, address: usize) -> bool {
318 let (start, end) = self.bounds();
319 address >= start && address < end
320 }
321}
322
323static mut ASLR_MANAGER: Option<AslrManager> = None;
325
326fn detect_page_size() -> usize {
329 #[cfg(target_os = "linux")]
330 {
331 4096
333 }
334
335 #[cfg(not(target_os = "linux"))]
336 {
337 4096 }
339}
340
341fn detect_address_space_size() -> usize {
342 #[cfg(target_pointer_width = "64")]
343 {
344 1usize << 48
346 }
347
348 #[cfg(target_pointer_width = "32")]
349 {
350 1usize << 32
351 }
352}
353
354fn detect_stack_region() -> MemoryRegion {
355 #[cfg(target_pointer_width = "64")]
356 {
357 MemoryRegion {
358 start: 0x7F0000000000, size: 0x10000000000, }
361 }
362
363 #[cfg(target_pointer_width = "32")]
364 {
365 MemoryRegion {
366 start: 0xC0000000, size: 0x40000000, }
369 }
370}
371
372fn detect_heap_region() -> MemoryRegion {
373 #[cfg(target_pointer_width = "64")]
374 {
375 MemoryRegion {
376 start: 0x100000000, size: 0x7EF000000000, }
379 }
380
381 #[cfg(target_pointer_width = "32")]
382 {
383 MemoryRegion {
384 start: 0x08000000, size: 0xB8000000, }
387 }
388}
389
390#[derive(Debug, Clone)]
392pub struct AslrStats {
393 pub randomizations_applied: usize,
394 pub entropy_consumed: u64,
395 pub entropy_bits_available: u32,
396 pub aslr_enabled: bool,
397}
398
399pub fn init_aslr(_config: SecurityConfig) -> Result<(), ThreadError> {
401 unsafe {
402 ASLR_MANAGER = Some(AslrManager::new());
403 }
404
405 let stats = get_aslr_stats();
406 Ok(())
409}
410
411pub fn create_randomized_layout() -> Result<ThreadMemoryLayout, ThreadError> {
413 unsafe {
414 match &ASLR_MANAGER {
415 Some(manager) => manager.generate_thread_layout(),
416 None => Err(ThreadError::InvalidState()),
417 }
418 }
419}
420
421pub fn randomize_stack_address(base: usize, size: usize) -> Result<usize, ThreadError> {
423 unsafe {
424 match &ASLR_MANAGER {
425 Some(manager) => manager.randomize_stack_address(base, size),
426 None => Ok(base), }
428 }
429}
430
431pub fn get_aslr_stats() -> AslrStats {
433 unsafe {
434 match &ASLR_MANAGER {
435 Some(manager) => AslrStats {
436 randomizations_applied: manager.randomizations_applied.load(Ordering::Relaxed),
437 entropy_consumed: manager.entropy_consumed.load(Ordering::Relaxed),
438 entropy_bits_available: manager.address_space_layout.available_entropy_bits(),
439 aslr_enabled: true,
440 },
441 None => AslrStats {
442 randomizations_applied: 0,
443 entropy_consumed: 0,
444 entropy_bits_available: 0,
445 aslr_enabled: false,
446 },
447 }
448 }
449}