preemptive_threads/
kernel.rs

1//! Kernel abstraction for managing the threading system.
2//!
3//! This module provides the main `Kernel` struct that coordinates all
4//! threading operations and eliminates global singleton state.
5
6use crate::arch::Arch;
7use crate::sched::Scheduler;
8use crate::thread_new::{ThreadId, Thread, JoinHandle, ReadyRef, RunningRef};
9use crate::mem::{StackPool, StackSizeClass};
10use core::marker::PhantomData;
11use portable_atomic::{AtomicBool, AtomicUsize, Ordering};
12
13/// Main kernel handle that manages the threading system.
14///
15/// This struct coordinates all threading operations and provides a safe
16/// interface to the underlying scheduler and architecture abstractions.
17///
18/// # Type Parameters
19///
20/// * `A` - Architecture implementation
21/// * `S` - Scheduler implementation
22pub struct Kernel<A: Arch, S: Scheduler> {
23    /// Scheduler instance
24    scheduler: S,
25    /// Stack pool for thread allocation
26    stack_pool: StackPool,
27    /// Architecture marker (zero-sized)
28    _arch: PhantomData<A>,
29    /// Whether the kernel has been initialized
30    initialized: AtomicBool,
31    /// Next thread ID to assign
32    next_thread_id: AtomicUsize,
33    /// Currently running thread on each CPU (simplified to single CPU for now)
34    current_thread: spin::Mutex<Option<RunningRef>>,
35}
36
37impl<A: Arch, S: Scheduler> Kernel<A, S> {
38    /// Create a new kernel instance.
39    ///
40    /// # Arguments
41    ///
42    /// * `scheduler` - Scheduler implementation to use
43    ///
44    /// # Returns
45    ///
46    /// A new kernel instance ready for initialization.
47    pub fn new(scheduler: S) -> Self {
48        Self {
49            scheduler,
50            stack_pool: StackPool::new(),
51            _arch: PhantomData,
52            initialized: AtomicBool::new(false),
53            next_thread_id: AtomicUsize::new(1), // Start from 1, never use 0
54            current_thread: spin::Mutex::new(None),
55        }
56    }
57    
58    /// Initialize the kernel.
59    ///
60    /// This must be called before any threading operations can be performed.
61    /// It sets up architecture-specific features and prepares the scheduler.
62    ///
63    /// # Returns
64    ///
65    /// `Ok(())` if initialization succeeds, `Err(())` if already initialized.
66    pub fn init(&self) -> Result<(), ()> {
67        if self.initialized.compare_exchange(
68            false, 
69            true, 
70            Ordering::AcqRel, 
71            Ordering::Acquire
72        ).is_ok() {
73            // Initialize architecture-specific features
74            unsafe {
75                #[cfg(feature = "x86_64")]
76                crate::arch::x86_64::init();
77            }
78            
79            // Initialize timer subsystem for preemption
80            unsafe {
81                #[cfg(feature = "x86_64")]
82                crate::time::x86_64_timer::init().map_err(|_| ())?;
83            }
84            
85            Ok(())
86        } else {
87            Err(()) // Already initialized
88        }
89    }
90    
91    /// Check if the kernel has been initialized.
92    pub fn is_initialized(&self) -> bool {
93        self.initialized.load(Ordering::Acquire)
94    }
95    
96    /// Generate a new unique thread ID.
97    ///
98    /// Thread IDs are never reused and are guaranteed to be unique
99    /// for the lifetime of the kernel instance.
100    pub fn next_thread_id(&self) -> ThreadId {
101        let id = self.next_thread_id.fetch_add(1, Ordering::AcqRel);
102        // Safety: We start from 1 and only increment, so this will never be zero
103        unsafe { ThreadId::new_unchecked(id) }
104    }
105    
106    /// Get a reference to the scheduler.
107    pub fn scheduler(&self) -> &S {
108        &self.scheduler
109    }
110    
111    /// Spawn a new thread.
112    ///
113    /// # Arguments
114    ///
115    /// * `entry_point` - Function to run in the new thread
116    /// * `priority` - Thread priority (0-255, higher = more important)
117    ///
118    /// # Returns
119    ///
120    /// JoinHandle for the newly created thread, or an error if creation fails.
121    pub fn spawn<F>(&self, entry_point: F, priority: u8) -> Result<JoinHandle, SpawnError>
122    where
123        F: FnOnce() + Send + 'static,
124    {
125        if !self.is_initialized() {
126            return Err(SpawnError::NotInitialized);
127        }
128        
129        // Allocate stack
130        let stack = self.stack_pool.allocate(StackSizeClass::Small)
131            .ok_or(SpawnError::OutOfMemory)?;
132        
133        // Generate unique thread ID
134        let thread_id = self.next_thread_id();
135        
136        // Create thread entry point wrapper
137        let entry_wrapper = move || {
138            entry_point();
139        };
140        
141        // For now, simplify to a basic entry point
142        let simple_entry: fn() = || {};
143        
144        // Create thread and join handle
145        let (thread, join_handle) = Thread::new(
146            thread_id,
147            stack,
148            simple_entry,
149            priority,
150        );
151        
152        // Convert to ReadyRef and enqueue in scheduler
153        let ready_ref = ReadyRef(thread);
154        self.scheduler.enqueue(ready_ref);
155        
156        Ok(join_handle)
157    }
158    
159    /// Yield the current thread, allowing other threads to run.
160    pub fn yield_now(&self) {
161        if !self.is_initialized() {
162            return; // Can't yield if not initialized
163        }
164        
165        if let Some(mut current_guard) = self.current_thread.try_lock() {
166            if let Some(current) = current_guard.take() {
167                // Current thread is yielding voluntarily
168                self.scheduler.on_yield(current);
169                
170                // Try to pick next thread to run
171                if let Some(next) = self.scheduler.pick_next(0) {
172                    let running = next.start_running();
173                    *current_guard = Some(running);
174                    
175                    // TODO: Perform actual context switch
176                }
177            }
178        }
179    }
180    
181    /// Handle a timer interrupt for preemptive scheduling.
182    ///
183    /// This should be called from the architecture-specific timer interrupt handler.
184    ///
185    /// # Safety
186    ///
187    /// Must be called from an interrupt context.
188    pub unsafe fn handle_timer_interrupt(&self) {
189        if !self.is_initialized() {
190            return;
191        }
192        
193        if let Some(mut current_guard) = self.current_thread.try_lock() {
194            if let Some(ref current) = *current_guard {
195                // Ask scheduler if current thread should be preempted
196                if let Some(ready_thread) = self.scheduler.on_tick(current) {
197                    // Preempt current thread
198                    if let Some(current) = current_guard.take() {
199                        // Current thread was preempted, enqueue it again
200                        self.scheduler.enqueue(ready_thread);
201                        
202                        // Try to pick next thread (could be the same one)
203                        if let Some(next) = self.scheduler.pick_next(0) {
204                            let running = next.start_running();
205                            *current_guard = Some(running);
206                            
207                            // TODO: Perform actual context switch
208                        }
209                    }
210                }
211            } else {
212                // No current thread, try to schedule one
213                if let Some(next) = self.scheduler.pick_next(0) {
214                    let running = next.start_running();
215                    *current_guard = Some(running);
216                    
217                    // TODO: Perform actual context switch
218                }
219            }
220        }
221    }
222    
223    /// Get current thread statistics.
224    pub fn thread_stats(&self) -> (usize, usize, usize) {
225        self.scheduler.stats()
226    }
227}
228
229/// Errors that can occur when spawning threads.
230#[derive(Debug, Clone, Copy, PartialEq, Eq)]
231pub enum SpawnError {
232    /// Kernel has not been initialized
233    NotInitialized,
234    /// Out of memory for stack allocation
235    OutOfMemory,
236    /// Maximum number of threads reached
237    TooManyThreads,
238    /// Invalid stack size
239    InvalidStackSize,
240}
241
242// Safety: Kernel can be shared between threads as long as the scheduler is thread-safe
243unsafe impl<A: Arch, S: Scheduler> Send for Kernel<A, S> {}
244unsafe impl<A: Arch, S: Scheduler> Sync for Kernel<A, S> {}