preemptive_threads/
safe_api.rs

1use crate::error::{ThreadError, ThreadResult};
2use crate::thread::ThreadId;
3use core::marker::PhantomData;
4
5/// Safe thread handle that ensures proper cleanup
6pub struct ThreadHandle {
7    id: ThreadId,
8    joined: bool,
9}
10
11impl ThreadHandle {
12    #[allow(dead_code)]
13    fn new(id: ThreadId) -> Self {
14        Self { id, joined: false }
15    }
16
17    /// Join this thread, waiting for it to complete
18    pub fn join(mut self) -> ThreadResult<()> {
19        self.joined = true;
20        // TODO: Implement actual join logic
21        Ok(())
22    }
23
24    /// Get the thread ID
25    pub fn id(&self) -> ThreadId {
26        self.id
27    }
28}
29
30impl Drop for ThreadHandle {
31    fn drop(&mut self) {
32        if !self.joined {
33            // Thread wasn't joined - this is a programming error
34            // In a real implementation, we'd want to detach or panic
35        }
36    }
37}
38
39/// Safe thread builder with compile-time checks
40pub struct ThreadBuilder<'a> {
41    stack_size: usize,
42    priority: u8,
43    name: Option<&'a str>,
44    _phantom: PhantomData<&'a ()>,
45}
46
47impl<'a> Default for ThreadBuilder<'a> {
48    fn default() -> Self {
49        Self::new()
50    }
51}
52
53impl<'a> ThreadBuilder<'a> {
54    /// Create a new thread builder
55    pub fn new() -> Self {
56        Self {
57            stack_size: 64 * 1024, // 64KB default
58            priority: 5,           // Medium priority
59            name: None,
60            _phantom: PhantomData,
61        }
62    }
63
64    /// Set stack size (must be at least 16KB)
65    pub fn stack_size(mut self, size: usize) -> Self {
66        assert!(size >= 16 * 1024, "Stack size must be at least 16KB");
67        self.stack_size = size;
68        self
69    }
70
71    /// Set thread priority (0-7, higher is more priority)
72    pub fn priority(mut self, priority: u8) -> Self {
73        assert!(priority < 8, "Priority must be 0-7");
74        self.priority = priority;
75        self
76    }
77
78    /// Set thread name for debugging
79    pub fn name(mut self, name: &'a str) -> Self {
80        self.name = Some(name);
81        self
82    }
83
84    /// Spawn the thread with a closure
85    ///
86    /// This is the safe API that doesn't require users to manage stacks manually
87    pub fn spawn<F>(self, _f: F) -> ThreadResult<ThreadHandle>
88    where
89        F: FnOnce() + Send + 'static,
90    {
91        // In a real implementation, we'd allocate the stack dynamically
92        // For now, we return an error since we can't safely do this in no_std
93        Err(ThreadError::NotImplemented)
94    }
95}
96
97/// Thread-local storage key
98pub struct ThreadLocal<T> {
99    #[allow(dead_code)]
100    key: usize,
101    _phantom: PhantomData<T>,
102}
103
104impl<T> Default for ThreadLocal<T> {
105    fn default() -> Self {
106        Self::new()
107    }
108}
109
110impl<T> ThreadLocal<T> {
111    /// Create a new thread-local storage key
112    pub const fn new() -> Self {
113        // In a real implementation, this would allocate a TLS key
114        Self {
115            key: 0,
116            _phantom: PhantomData,
117        }
118    }
119
120    /// Get the value for the current thread
121    pub fn get(&self) -> Option<&T> {
122        // TODO: Implement TLS lookup
123        None
124    }
125
126    /// Set the value for the current thread
127    pub fn set(&self, _value: T) {
128        // TODO: Implement TLS storage
129    }
130}
131
132/// Safe thread pool for managing multiple threads
133pub struct ThreadPool {
134    max_threads: usize,
135    active_threads: usize,
136}
137
138impl ThreadPool {
139    /// Create a new thread pool
140    pub fn new(max_threads: usize) -> Self {
141        assert!(
142            max_threads > 0 && max_threads <= 32,
143            "Thread pool size must be 1-32"
144        );
145
146        Self {
147            max_threads,
148            active_threads: 0,
149        }
150    }
151
152    /// Execute a task in the thread pool
153    pub fn execute<F>(&mut self, _task: F) -> ThreadResult<()>
154    where
155        F: FnOnce() + Send + 'static,
156    {
157        if self.active_threads >= self.max_threads {
158            return Err(ThreadError::SchedulerFull);
159        }
160
161        // TODO: Implement actual thread pool execution
162        Err(ThreadError::NotImplemented)
163    }
164
165    /// Get the number of active threads
166    pub fn active_count(&self) -> usize {
167        self.active_threads
168    }
169
170    /// Shut down the thread pool, waiting for all threads to complete
171    pub fn shutdown(self) {
172        // TODO: Implement graceful shutdown
173    }
174}
175
176/// Safe mutex implementation
177pub struct Mutex<T> {
178    data: core::cell::UnsafeCell<T>,
179    locked: core::sync::atomic::AtomicBool,
180}
181
182unsafe impl<T: Send> Sync for Mutex<T> {}
183unsafe impl<T: Send> Send for Mutex<T> {}
184
185impl<T> Mutex<T> {
186    /// Create a new mutex
187    pub const fn new(data: T) -> Self {
188        Self {
189            data: core::cell::UnsafeCell::new(data),
190            locked: core::sync::atomic::AtomicBool::new(false),
191        }
192    }
193
194    /// Lock the mutex
195    pub fn lock(&self) -> MutexGuard<T> {
196        // Spin lock implementation
197        while self
198            .locked
199            .compare_exchange_weak(
200                false,
201                true,
202                core::sync::atomic::Ordering::Acquire,
203                core::sync::atomic::Ordering::Relaxed,
204            )
205            .is_err()
206        {
207            core::hint::spin_loop();
208        }
209
210        MutexGuard { mutex: self }
211    }
212
213    /// Try to lock the mutex
214    pub fn try_lock(&self) -> Option<MutexGuard<T>> {
215        if self
216            .locked
217            .compare_exchange(
218                false,
219                true,
220                core::sync::atomic::Ordering::Acquire,
221                core::sync::atomic::Ordering::Relaxed,
222            )
223            .is_ok()
224        {
225            Some(MutexGuard { mutex: self })
226        } else {
227            None
228        }
229    }
230}
231
232/// RAII guard for mutex
233pub struct MutexGuard<'a, T> {
234    mutex: &'a Mutex<T>,
235}
236
237impl<'a, T> core::ops::Deref for MutexGuard<'a, T> {
238    type Target = T;
239
240    fn deref(&self) -> &Self::Target {
241        unsafe { &*self.mutex.data.get() }
242    }
243}
244
245impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> {
246    fn deref_mut(&mut self) -> &mut Self::Target {
247        unsafe { &mut *self.mutex.data.get() }
248    }
249}
250
251impl<'a, T> Drop for MutexGuard<'a, T> {
252    fn drop(&mut self) {
253        self.mutex
254            .locked
255            .store(false, core::sync::atomic::Ordering::Release);
256    }
257}
258
259/// Safe yield function
260pub fn yield_now() {
261    crate::sync::yield_thread();
262}
263
264/// Safe exit function
265pub fn exit_thread() {
266    crate::sync::exit_thread();
267}