preemptive_threads/mem/
stack_pool.rs

1//! Stack pool allocator for thread stacks.
2//!
3//! This module provides a pool-based allocator for thread stacks with
4//! different size classes and optional guard page support.
5
6use portable_atomic::{AtomicUsize, Ordering};
7use spin::Mutex;
8use core::ptr::NonNull;
9// mem and MaybeUninit imports not needed yet
10// use core::mem::{self, MaybeUninit};
11
12// Use Vec from alloc or std depending on features
13#[cfg(feature = "std-shim")]
14extern crate std;
15
16#[cfg(feature = "std-shim")]
17use std::vec::Vec;
18
19#[cfg(not(feature = "std-shim"))]
20extern crate alloc;
21
22#[cfg(not(feature = "std-shim"))]
23use alloc::vec::Vec;
24
25/// Stack size classes for the pool allocator.
26///
27/// Different threads may need different stack sizes, so we provide
28/// several size classes to minimize memory waste.
29#[derive(Debug, Clone, Copy, PartialEq, Eq)]
30pub enum StackSizeClass {
31    /// Small stack: 4 KiB
32    Small = 4096,
33    /// Medium stack: 16 KiB  
34    Medium = 16384,
35    /// Large stack: 64 KiB
36    Large = 65536,
37    /// Extra large stack: 256 KiB
38    ExtraLarge = 262144,
39}
40
41impl StackSizeClass {
42    /// Get the size in bytes for this stack class.
43    pub fn size(self) -> usize {
44        self as usize
45    }
46    
47    /// Choose the appropriate size class for a requested stack size.
48    ///
49    /// # Arguments
50    ///
51    /// * `requested_size` - The minimum stack size required
52    ///
53    /// # Returns
54    ///
55    /// The smallest size class that can accommodate the requested size.
56    pub fn for_size(requested_size: usize) -> Option<Self> {
57        match requested_size {
58            0..=4096 => Some(Self::Small),
59            4097..=16384 => Some(Self::Medium),
60            16385..=65536 => Some(Self::Large),
61            65537..=262144 => Some(Self::ExtraLarge),
62            _ => None, // Size too large
63        }
64    }
65}
66
67/// A thread stack with optional guard pages.
68///
69/// This structure represents a single allocated stack that can be
70/// used by a thread. It handles both the memory allocation and
71/// optional guard page protection.
72#[derive(Clone)]
73pub struct Stack {
74    /// Pointer to the start of the stack memory (lowest address)
75    memory: NonNull<u8>,
76    /// Total size of allocated memory (including guard pages)
77    total_size: usize,
78    /// Usable stack size (excluding guard pages)
79    usable_size: usize,
80    /// Size class this stack belongs to
81    size_class: StackSizeClass,
82    /// Whether this stack has guard pages
83    has_guard_pages: bool,
84}
85
86impl Stack {
87    /// Get the usable stack size in bytes.
88    pub fn size(&self) -> usize {
89        self.usable_size
90    }
91    
92    /// Get the stack size class.
93    pub fn size_class(&self) -> StackSizeClass {
94        self.size_class
95    }
96    
97    /// Get a pointer to the bottom of the stack (highest address).
98    ///
99    /// On most architectures, stacks grow downward, so this is where
100    /// the stack pointer should be initialized.
101    pub fn stack_bottom(&self) -> *mut u8 {
102        unsafe {
103            self.memory.as_ptr().add(
104                if self.has_guard_pages {
105                    4096 + self.usable_size // Skip guard page
106                } else {
107                    self.usable_size
108                }
109            )
110        }
111    }
112    
113    /// Get a pointer to the top of the stack (lowest address).
114    pub fn stack_top(&self) -> *const u8 {
115        unsafe {
116            if self.has_guard_pages {
117                self.memory.as_ptr().add(4096) // Skip guard page
118            } else {
119                self.memory.as_ptr()
120            }
121        }
122    }
123    
124    /// Get bottom pointer (alias for stack_bottom for compatibility).
125    pub fn bottom(&self) -> *mut u8 {
126        self.stack_bottom()
127    }
128    
129    /// Get top pointer (alias for stack_top for compatibility).  
130    pub fn top(&self) -> *const u8 {
131        self.stack_top()
132    }
133    
134    /// Check if this stack has guard pages enabled.
135    pub fn has_guard_pages(&self) -> bool {
136        self.has_guard_pages
137    }
138    
139    /// Install a stack canary value for overflow detection.
140    ///
141    /// This writes a known pattern at the bottom of the usable stack
142    /// that can be checked later to detect stack overflow.
143    ///
144    /// # Arguments
145    ///
146    /// * `canary` - The canary value to write
147    pub fn install_canary(&self, canary: u64) {
148        let canary_location = self.stack_top() as *mut u64;
149        unsafe {
150            canary_location.write(canary);
151        }
152    }
153    
154    /// Check if the stack canary is still intact.
155    ///
156    /// # Arguments
157    ///
158    /// * `expected_canary` - The expected canary value
159    ///
160    /// # Returns
161    ///
162    /// `true` if the canary is intact, `false` if it has been corrupted.
163    pub fn check_canary(&self, expected_canary: u64) -> bool {
164        let canary_location = self.stack_top() as *const u64;
165        unsafe { canary_location.read() == expected_canary }
166    }
167}
168
169/// Pool-based allocator for thread stacks.
170///
171/// This allocator maintains separate free lists for each stack size class
172/// to minimize fragmentation and allocation overhead.
173pub struct StackPool {
174    /// Free stacks for each size class
175    free_stacks: [Mutex<Vec<Stack>>; 4],
176    /// Statistics counters
177    stats: StackPoolStats,
178}
179
180/// Statistics for the stack pool allocator.
181#[derive(Debug, Default)]
182struct StackPoolStats {
183    /// Number of stacks allocated
184    allocated: AtomicUsize,
185    /// Number of stacks returned to the pool
186    deallocated: AtomicUsize,
187    /// Number of stacks currently in use
188    in_use: AtomicUsize,
189}
190
191impl StackPool {
192    /// Create a new stack pool.
193    pub const fn new() -> Self {
194        Self {
195            free_stacks: [
196                Mutex::new(Vec::new()),
197                Mutex::new(Vec::new()),
198                Mutex::new(Vec::new()),
199                Mutex::new(Vec::new()),
200            ],
201            stats: StackPoolStats {
202                allocated: AtomicUsize::new(0),
203                deallocated: AtomicUsize::new(0),
204                in_use: AtomicUsize::new(0),
205            },
206        }
207    }
208    
209    /// Allocate a stack of the given size class.
210    ///
211    /// This will first try to reuse a stack from the free list, and only
212    /// allocate new memory if no suitable stack is available.
213    ///
214    /// # Arguments
215    ///
216    /// * `size_class` - The desired stack size class
217    ///
218    /// # Returns
219    ///
220    /// A new stack, or `None` if allocation fails.
221    pub fn allocate(&self, size_class: StackSizeClass) -> Option<Stack> {
222        let class_index = self.size_class_index(size_class);
223        
224        // Try to get a stack from the free list first
225        if let Some(mut free_list) = self.free_stacks[class_index].try_lock() {
226            if let Some(stack) = free_list.pop() {
227                self.stats.in_use.fetch_add(1, Ordering::AcqRel);
228                return Some(stack);
229            }
230        }
231        
232        // Need to allocate a new stack
233        self.allocate_new_stack(size_class)
234    }
235    
236    /// Return a stack to the pool for reuse.
237    ///
238    /// # Arguments
239    ///
240    /// * `stack` - The stack to return to the pool
241    pub fn deallocate(&self, stack: Stack) {
242        let class_index = self.size_class_index(stack.size_class);
243        
244        // TODO: In a hardened implementation, we might want to wipe the stack memory
245        #[cfg(feature = "hardened")]
246        {
247            // Wipe stack memory for security
248            unsafe {
249                core::ptr::write_bytes(
250                    stack.memory.as_ptr(),
251                    0,
252                    stack.usable_size,
253                );
254            }
255        }
256        
257        if let Some(mut free_list) = self.free_stacks[class_index].try_lock() {
258            free_list.push(stack);
259            self.stats.in_use.fetch_sub(1, Ordering::AcqRel);
260            self.stats.deallocated.fetch_add(1, Ordering::AcqRel);
261        } else {
262            // If we can't get the lock, just leak the stack for now
263            // In a real implementation, we might want a different strategy
264        }
265    }
266    
267    /// Get statistics about the stack pool.
268    pub fn stats(&self) -> (usize, usize, usize) {
269        (
270            self.stats.allocated.load(Ordering::Acquire),
271            self.stats.deallocated.load(Ordering::Acquire),
272            self.stats.in_use.load(Ordering::Acquire),
273        )
274    }
275    
276    /// Convert a size class to an array index.
277    fn size_class_index(&self, size_class: StackSizeClass) -> usize {
278        match size_class {
279            StackSizeClass::Small => 0,
280            StackSizeClass::Medium => 1,
281            StackSizeClass::Large => 2,
282            StackSizeClass::ExtraLarge => 3,
283        }
284    }
285    
286    /// Allocate a new stack of the given size class.
287    fn allocate_new_stack(&self, size_class: StackSizeClass) -> Option<Stack> {
288        let usable_size = size_class.size();
289        let has_guard_pages = cfg!(feature = "mmu");
290        
291        // Calculate total size including guard pages
292        let total_size = if has_guard_pages {
293            usable_size + 8192 // Guard pages at both ends
294        } else {
295            usable_size
296        };
297        
298        // TODO: Replace with proper no_std memory allocation
299        #[cfg(feature = "std-shim")]
300        {
301            extern crate std;
302            use std::alloc::{alloc, Layout};
303            
304            let layout = Layout::from_size_align(total_size, 4096).ok()?;
305            let memory = unsafe { alloc(layout) };
306            
307            if memory.is_null() {
308                return None;
309            }
310            
311            let memory = unsafe { NonNull::new_unchecked(memory) };
312            
313            // Set up guard pages if MMU feature is enabled
314            #[cfg(feature = "mmu")]
315            if has_guard_pages {
316                self.setup_guard_pages(&memory, total_size);
317            }
318            
319            let stack = Stack {
320                memory,
321                total_size,
322                usable_size,
323                size_class,
324                has_guard_pages,
325            };
326            
327            self.stats.allocated.fetch_add(1, Ordering::AcqRel);
328            self.stats.in_use.fetch_add(1, Ordering::AcqRel);
329            
330            Some(stack)
331        }
332        
333        #[cfg(not(feature = "std-shim"))]
334        {
335            unimplemented!("Stack allocation requires a custom allocator in no_std environments")
336        }
337    }
338    
339    /// Set up guard pages for a stack allocation.
340    #[cfg(feature = "mmu")]
341    fn setup_guard_pages(&self, _memory: &NonNull<u8>, _total_size: usize) {
342        // TODO: Use mprotect or similar to make guard pages non-accessible
343        // This would require platform-specific code
344        unimplemented!("Guard page setup requires platform-specific MMU manipulation")
345    }
346}
347
348impl Drop for Stack {
349    fn drop(&mut self) {
350        // TODO: In a real implementation, we'd need to coordinate with the
351        // stack pool to properly deallocate memory
352        #[cfg(feature = "std-shim")]
353        {
354            extern crate std;
355            use std::alloc::{dealloc, Layout};
356            
357            let layout = Layout::from_size_align(self.total_size, 4096).unwrap();
358            unsafe {
359                dealloc(self.memory.as_ptr(), layout);
360            }
361        }
362    }
363}
364
365unsafe impl Send for Stack {}
366unsafe impl Sync for Stack {}
367
368#[cfg(test)]
369mod tests {
370    use super::*;
371    
372    #[test]
373    fn test_stack_size_class_for_size() {
374        assert_eq!(StackSizeClass::for_size(1024), Some(StackSizeClass::Small));
375        assert_eq!(StackSizeClass::for_size(4096), Some(StackSizeClass::Small));
376        assert_eq!(StackSizeClass::for_size(8192), Some(StackSizeClass::Medium));
377        assert_eq!(StackSizeClass::for_size(32768), Some(StackSizeClass::Large));
378        assert_eq!(StackSizeClass::for_size(131072), Some(StackSizeClass::ExtraLarge));
379        assert_eq!(StackSizeClass::for_size(500000), None);
380    }
381    
382    #[cfg(feature = "std-shim")]
383    #[test]
384    fn test_stack_pool_basic() {
385        let pool = StackPool::new();
386        let stack = pool.allocate(StackSizeClass::Small).unwrap();
387        
388        assert_eq!(stack.size_class(), StackSizeClass::Small);
389        assert_eq!(stack.size(), StackSizeClass::Small.size());
390        
391        pool.deallocate(stack);
392        
393        let (allocated, deallocated, in_use) = pool.stats();
394        assert_eq!(allocated, 1);
395        assert_eq!(deallocated, 1);
396        assert_eq!(in_use, 0);
397    }
398    
399    #[cfg(feature = "std-shim")]
400    #[test]
401    fn test_stack_canary() {
402        let pool = StackPool::new();
403        let stack = pool.allocate(StackSizeClass::Small).unwrap();
404        
405        let canary_value = 0xDEADBEEFCAFEBABE;
406        stack.install_canary(canary_value);
407        assert!(stack.check_canary(canary_value));
408        assert!(!stack.check_canary(0x1234567890ABCDEF));
409        
410        pool.deallocate(stack);
411    }
412}