preemptive_threads/security/
aslr.rs

1//! Address Space Layout Randomization (ASLR) implementation.
2
3use crate::errors::ThreadError;
4use crate::security::{SecurityConfig, crypto_rng::secure_random_u64};
5use portable_atomic::{AtomicU64, AtomicUsize, Ordering};
6use alloc::{vec, vec::Vec};
7
8/// ASLR implementation for thread stacks and memory layout.
9pub struct AslrManager {
10    /// Randomization statistics
11    randomizations_applied: AtomicUsize,
12    /// Entropy used for randomization
13    entropy_consumed: AtomicU64,
14    /// Virtual address space layout
15    address_space_layout: AddressSpaceLayout,
16}
17
18impl AslrManager {
19    pub fn new() -> Self {
20        Self {
21            randomizations_applied: AtomicUsize::new(0),
22            entropy_consumed: AtomicU64::new(0),
23            address_space_layout: AddressSpaceLayout::detect(),
24        }
25    }
26    
27    /// Randomize stack allocation address.
28    pub fn randomize_stack_address(
29        &self,
30        base_address: usize,
31        stack_size: usize,
32    ) -> Result<usize, ThreadError> {
33        let entropy_bits = self.address_space_layout.available_entropy_bits();
34        if entropy_bits == 0 {
35            return Ok(base_address); // No randomization available
36        }
37        
38        // Generate random offset within available address space
39        let max_offset = self.address_space_layout.max_stack_offset(stack_size);
40        let random_offset = self.generate_random_offset(max_offset)?;
41        
42        // Apply offset with proper alignment
43        let page_size = self.address_space_layout.page_size;
44        let aligned_offset = (random_offset / page_size) * page_size;
45        
46        let randomized_address = base_address.wrapping_add(aligned_offset);
47        
48        // Verify address is in valid range
49        if !self.address_space_layout.is_valid_stack_address(randomized_address, stack_size) {
50            return Err(ThreadError::MemoryError());
51        }
52        
53        self.randomizations_applied.fetch_add(1, Ordering::Relaxed);
54        Ok(randomized_address)
55    }
56    
57    /// Randomize heap allocation address.
58    pub fn randomize_heap_address(
59        &self,
60        base_address: usize,
61        allocation_size: usize,
62    ) -> Result<usize, ThreadError> {
63        let max_offset = self.address_space_layout.max_heap_offset(allocation_size);
64        let random_offset = self.generate_random_offset(max_offset)?;
65        
66        // Apply alignment requirements
67        let align_size = core::mem::align_of::<usize>();
68        let aligned_offset = (random_offset / align_size) * align_size;
69        
70        let randomized_address = base_address.wrapping_add(aligned_offset);
71        
72        if !self.address_space_layout.is_valid_heap_address(randomized_address, allocation_size) {
73            return Err(ThreadError::MemoryError());
74        }
75        
76        self.randomizations_applied.fetch_add(1, Ordering::Relaxed);
77        Ok(randomized_address)
78    }
79    
80    /// Generate guard gap between memory regions.
81    pub fn generate_guard_gap(&self) -> Result<usize, ThreadError> {
82        let min_gap = self.address_space_layout.page_size;
83        let max_gap = min_gap * 16; // Up to 16 pages
84        
85        let random_pages = self.generate_random_offset(16)? + 1;
86        Ok(random_pages * min_gap)
87    }
88    
89    /// Generate random memory layout for thread.
90    pub fn generate_thread_layout(&self) -> Result<ThreadMemoryLayout, ThreadError> {
91        let stack_base = self.address_space_layout.stack_base_address();
92        let heap_base = self.address_space_layout.heap_base_address();
93        
94        // Randomize stack position
95        let stack_size = 1024 * 1024; // 1MB default stack
96        let randomized_stack = self.randomize_stack_address(stack_base, stack_size)?;
97        
98        // Randomize heap position
99        let heap_size = 64 * 1024 * 1024; // 64MB heap region
100        let randomized_heap = self.randomize_heap_address(heap_base, heap_size)?;
101        
102        // Add guard gaps
103        let stack_guard_gap = self.generate_guard_gap()?;
104        let heap_guard_gap = self.generate_guard_gap()?;
105        
106        Ok(ThreadMemoryLayout {
107            stack_base: randomized_stack,
108            stack_size,
109            stack_guard_gap,
110            heap_base: randomized_heap,
111            heap_size,
112            heap_guard_gap,
113            randomization_entropy: self.entropy_consumed.load(Ordering::Relaxed),
114        })
115    }
116    
117    /// Generate cryptographically secure random offset.
118    fn generate_random_offset(&self, max_offset: usize) -> Result<usize, ThreadError> {
119        if max_offset == 0 {
120            return Ok(0);
121        }
122        
123        let random_value = secure_random_u64()?;
124        self.entropy_consumed.fetch_add(8, Ordering::Relaxed);
125        
126        Ok((random_value as usize) % max_offset)
127    }
128}
129
130/// Address space layout detection and management.
131#[derive(Debug, Clone)]
132pub struct AddressSpaceLayout {
133    /// Page size for alignment
134    pub page_size: usize,
135    /// Available address space size
136    pub address_space_size: usize,
137    /// Stack region boundaries
138    pub stack_region: MemoryRegion,
139    /// Heap region boundaries
140    pub heap_region: MemoryRegion,
141    /// Architecture-specific constraints
142    pub arch_constraints: ArchConstraints,
143}
144
145impl AddressSpaceLayout {
146    /// Detect current system address space layout.
147    pub fn detect() -> Self {
148        Self {
149            page_size: detect_page_size(),
150            address_space_size: detect_address_space_size(),
151            stack_region: detect_stack_region(),
152            heap_region: detect_heap_region(),
153            arch_constraints: ArchConstraints::detect(),
154        }
155    }
156    
157    /// Get available entropy bits for randomization.
158    pub fn available_entropy_bits(&self) -> u32 {
159        #[cfg(target_pointer_width = "64")]
160        {
161            // 64-bit systems typically have more address space for randomization
162            match self.arch_constraints.arch {
163                Architecture::X86_64 => 28, // ~256GB randomization space
164                Architecture::Aarch64 => 32, // ~4TB randomization space
165                Architecture::Riscv64 => 30, // ~1TB randomization space
166                _ => 20, // Conservative default
167            }
168        }
169        
170        #[cfg(target_pointer_width = "32")]
171        {
172            // 32-bit systems have limited address space
173            16 // ~64MB randomization space
174        }
175    }
176    
177    /// Get maximum stack offset for randomization.
178    pub fn max_stack_offset(&self, stack_size: usize) -> usize {
179        let available_space = self.stack_region.size.saturating_sub(stack_size);
180        available_space / 2 // Use half available space for randomization
181    }
182    
183    /// Get maximum heap offset for randomization.
184    pub fn max_heap_offset(&self, heap_size: usize) -> usize {
185        let available_space = self.heap_region.size.saturating_sub(heap_size);
186        available_space / 4 // Use quarter available space for randomization
187    }
188    
189    /// Check if stack address is valid.
190    pub fn is_valid_stack_address(&self, address: usize, size: usize) -> bool {
191        address >= self.stack_region.start &&
192        address + size <= self.stack_region.start + self.stack_region.size
193    }
194    
195    /// Check if heap address is valid.
196    pub fn is_valid_heap_address(&self, address: usize, size: usize) -> bool {
197        address >= self.heap_region.start &&
198        address + size <= self.heap_region.start + self.heap_region.size
199    }
200    
201    /// Get stack base address for allocation.
202    pub fn stack_base_address(&self) -> usize {
203        self.stack_region.start + (self.stack_region.size / 4)
204    }
205    
206    /// Get heap base address for allocation.
207    pub fn heap_base_address(&self) -> usize {
208        self.heap_region.start + (self.heap_region.size / 8)
209    }
210}
211
212/// Memory region descriptor.
213#[derive(Debug, Clone)]
214pub struct MemoryRegion {
215    pub start: usize,
216    pub size: usize,
217}
218
219/// Architecture-specific constraints for ASLR.
220#[derive(Debug, Clone)]
221pub struct ArchConstraints {
222    pub arch: Architecture,
223    pub min_alignment: usize,
224    pub max_randomization: usize,
225    pub forbidden_ranges: Vec<MemoryRegion>,
226}
227
228impl ArchConstraints {
229    pub fn detect() -> Self {
230        #[cfg(feature = "x86_64")]
231        {
232            return Self {
233                arch: Architecture::X86_64,
234                min_alignment: 4096, // Page aligned
235                max_randomization: 1 << 28, // 256MB
236                forbidden_ranges: vec![
237                    // Kernel space
238                    MemoryRegion { start: 0xFFFF800000000000, size: usize::MAX },
239                ],
240            };
241        }
242        
243        #[cfg(feature = "arm64")]
244        {
245            Self {
246                arch: Architecture::Aarch64,
247                min_alignment: 4096,
248                max_randomization: 1 << 32, // 4GB
249                forbidden_ranges: vec![
250                    // Kernel space (simplified)
251                    MemoryRegion { start: 0xFFFF000000000000, size: usize::MAX },
252                ],
253            }
254        }
255        
256        #[cfg(not(any(feature = "x86_64", feature = "arm64")))]
257        {
258            Self {
259                arch: Architecture::Generic,
260                min_alignment: core::mem::size_of::<usize>(),
261                max_randomization: 1 << 20, // 1MB conservative
262                forbidden_ranges: Vec::new(),
263            }
264        }
265    }
266}
267
268/// Supported architectures for ASLR.
269#[derive(Debug, Clone, Copy, PartialEq, Eq)]
270pub enum Architecture {
271    X86_64,
272    Aarch64,
273    Riscv64,
274    Generic,
275}
276
277/// Thread-specific memory layout with randomization.
278#[derive(Debug, Clone)]
279pub struct ThreadMemoryLayout {
280    pub stack_base: usize,
281    pub stack_size: usize,
282    pub stack_guard_gap: usize,
283    pub heap_base: usize,
284    pub heap_size: usize,
285    pub heap_guard_gap: usize,
286    pub randomization_entropy: u64,
287}
288
289impl ThreadMemoryLayout {
290    /// Create stack with randomized address.
291    pub fn create_randomized_stack(&self) -> Result<RandomizedStack, ThreadError> {
292        // In a real implementation, this would allocate memory at the randomized address
293        Ok(RandomizedStack {
294            layout: self.clone(),
295            actual_address: self.stack_base,
296            entropy_used: 64, // bits of entropy used
297        })
298    }
299}
300
301/// Stack with randomized allocation address.
302pub struct RandomizedStack {
303    pub layout: ThreadMemoryLayout,
304    pub actual_address: usize,
305    pub entropy_used: u32,
306}
307
308impl RandomizedStack {
309    /// Get stack bounds with guard gaps.
310    pub fn bounds(&self) -> (usize, usize) {
311        let start = self.actual_address + self.layout.stack_guard_gap;
312        let end = start + self.layout.stack_size - self.layout.stack_guard_gap;
313        (start, end)
314    }
315    
316    /// Check if address is within this stack.
317    pub fn contains(&self, address: usize) -> bool {
318        let (start, end) = self.bounds();
319        address >= start && address < end
320    }
321}
322
323/// Global ASLR manager instance.
324static mut ASLR_MANAGER: Option<AslrManager> = None;
325
326/// System detection functions.
327
328fn detect_page_size() -> usize {
329    #[cfg(target_os = "linux")]
330    {
331        // In real implementation, would use sysconf(_SC_PAGESIZE)
332        4096
333    }
334    
335    #[cfg(not(target_os = "linux"))]
336    {
337        4096 // Common page size
338    }
339}
340
341fn detect_address_space_size() -> usize {
342    #[cfg(target_pointer_width = "64")]
343    {
344        // 48-bit address space is common on x86_64
345        1usize << 48
346    }
347    
348    #[cfg(target_pointer_width = "32")]
349    {
350        1usize << 32
351    }
352}
353
354fn detect_stack_region() -> MemoryRegion {
355    #[cfg(target_pointer_width = "64")]
356    {
357        MemoryRegion {
358            start: 0x7F0000000000, // Typical user stack region on Linux x86_64
359            size: 0x10000000000,   // 1TB region
360        }
361    }
362    
363    #[cfg(target_pointer_width = "32")]
364    {
365        MemoryRegion {
366            start: 0xC0000000,  // 3GB mark
367            size: 0x40000000,   // 1GB region
368        }
369    }
370}
371
372fn detect_heap_region() -> MemoryRegion {
373    #[cfg(target_pointer_width = "64")]
374    {
375        MemoryRegion {
376            start: 0x100000000,    // 4GB mark
377            size: 0x7EF000000000,  // Large heap region
378        }
379    }
380    
381    #[cfg(target_pointer_width = "32")]
382    {
383        MemoryRegion {
384            start: 0x08000000,  // 128MB mark
385            size: 0xB8000000,   // ~3GB region
386        }
387    }
388}
389
390/// ASLR statistics.
391#[derive(Debug, Clone)]
392pub struct AslrStats {
393    pub randomizations_applied: usize,
394    pub entropy_consumed: u64,
395    pub entropy_bits_available: u32,
396    pub aslr_enabled: bool,
397}
398
399/// Initialize ASLR subsystem.
400pub fn init_aslr(_config: SecurityConfig) -> Result<(), ThreadError> {
401    unsafe {
402        ASLR_MANAGER = Some(AslrManager::new());
403    }
404    
405    let stats = get_aslr_stats();
406    // ASLR initialized with entropy bits available from stats
407    
408    Ok(())
409}
410
411/// Create randomized thread memory layout.
412pub fn create_randomized_layout() -> Result<ThreadMemoryLayout, ThreadError> {
413    unsafe {
414        match &ASLR_MANAGER {
415            Some(manager) => manager.generate_thread_layout(),
416            None => Err(ThreadError::InvalidState()),
417        }
418    }
419}
420
421/// Randomize stack allocation address.
422pub fn randomize_stack_address(base: usize, size: usize) -> Result<usize, ThreadError> {
423    unsafe {
424        match &ASLR_MANAGER {
425            Some(manager) => manager.randomize_stack_address(base, size),
426            None => Ok(base), // No randomization if not initialized
427        }
428    }
429}
430
431/// Get ASLR statistics.
432pub fn get_aslr_stats() -> AslrStats {
433    unsafe {
434        match &ASLR_MANAGER {
435            Some(manager) => AslrStats {
436                randomizations_applied: manager.randomizations_applied.load(Ordering::Relaxed),
437                entropy_consumed: manager.entropy_consumed.load(Ordering::Relaxed),
438                entropy_bits_available: manager.address_space_layout.available_entropy_bits(),
439                aslr_enabled: true,
440            },
441            None => AslrStats {
442                randomizations_applied: 0,
443                entropy_consumed: 0,
444                entropy_bits_available: 0,
445                aslr_enabled: false,
446            },
447        }
448    }
449}