syscall/
sigabi.rs

1use core::sync::atomic::{AtomicUsize, Ordering};
2
3/// Signal runtime struct for the entire process
4#[derive(Debug)]
5#[repr(C, align(4096))]
6pub struct SigProcControl {
7    pub pending: AtomicU64,
8    pub actions: [RawAction; 64],
9    pub sender_infos: [AtomicU64; 32],
10    //pub queue: [RealtimeSig; 32], TODO
11    // qhead, qtail TODO
12}
13/*#[derive(Debug)]
14#[repr(transparent)]
15pub struct RealtimeSig {
16    pub arg: NonatomicUsize,
17}*/
18#[derive(Debug, Default)]
19#[repr(C, align(16))]
20pub struct RawAction {
21    /// Only two MSBs are interesting for the kernel. If bit 63 is set, signal is ignored. If bit
22    /// 62 is set and the signal is SIGTSTP/SIGTTIN/SIGTTOU, it's equivalent to the action of
23    /// Stop.
24    pub first: AtomicU64,
25    /// Completely ignored by the kernel, but exists so userspace can (when 16-byte atomics exist)
26    /// atomically set both the handler, sigaction flags, and sigaction mask.
27    pub user_data: AtomicU64,
28}
29
30/// Signal runtime struct for a thread
31#[derive(Debug, Default)]
32#[repr(C)]
33pub struct Sigcontrol {
34    // composed of [lo "pending" | lo "unmasked", hi "pending" | hi "unmasked"]
35    pub word: [AtomicU64; 2],
36
37    // lo = sender pid, hi = sender ruid
38    pub sender_infos: [AtomicU64; 32],
39
40    pub control_flags: SigatomicUsize,
41
42    pub saved_ip: NonatomicUsize,          // rip/eip/pc
43    pub saved_archdep_reg: NonatomicUsize, // rflags(x64)/eflags(x86)/x0(aarch64)/t0(riscv64)
44}
45#[derive(Clone, Copy, Debug)]
46pub struct SenderInfo {
47    pub pid: u32,
48    pub ruid: u32,
49}
50impl SenderInfo {
51    #[inline]
52    pub fn raw(self) -> u64 {
53        u64::from(self.pid) | (u64::from(self.ruid) << 32)
54    }
55    #[inline]
56    pub const fn from_raw(raw: u64) -> Self {
57        Self {
58            pid: raw as u32,
59            ruid: (raw >> 32) as u32,
60        }
61    }
62}
63
64impl Sigcontrol {
65    pub fn currently_pending_unblocked(&self, proc: &SigProcControl) -> u64 {
66        let proc_pending = proc.pending.load(Ordering::Relaxed);
67        let [w0, w1] = core::array::from_fn(|i| {
68            let w = self.word[i].load(Ordering::Relaxed);
69            ((w | (proc_pending >> (i * 32))) & 0xffff_ffff) & (w >> 32)
70        });
71        //core::sync::atomic::fence(Ordering::Acquire);
72        w0 | (w1 << 32)
73    }
74    pub fn set_allowset(&self, new_allowset: u64) -> u64 {
75        //core::sync::atomic::fence(Ordering::Release);
76        let [w0, w1] = self.word.each_ref().map(|w| w.load(Ordering::Relaxed));
77        let old_a0 = w0 & 0xffff_ffff_0000_0000;
78        let old_a1 = w1 & 0xffff_ffff_0000_0000;
79        let new_a0 = (new_allowset & 0xffff_ffff) << 32;
80        let new_a1 = new_allowset & 0xffff_ffff_0000_0000;
81
82        let prev_w0 = self.word[0].fetch_add(new_a0.wrapping_sub(old_a0), Ordering::Relaxed);
83        let prev_w1 = self.word[0].fetch_add(new_a1.wrapping_sub(old_a1), Ordering::Relaxed);
84        //core::sync::atomic::fence(Ordering::Acquire);
85        let up0 = prev_w0 & (prev_w0 >> 32);
86        let up1 = prev_w1 & (prev_w1 >> 32);
87
88        up0 | (up1 << 32)
89    }
90}
91
92#[derive(Debug, Default)]
93#[repr(transparent)]
94pub struct SigatomicUsize(AtomicUsize);
95
96impl SigatomicUsize {
97    #[inline]
98    pub fn load(&self, ordering: Ordering) -> usize {
99        let value = self.0.load(Ordering::Relaxed);
100        if ordering != Ordering::Relaxed {
101            core::sync::atomic::compiler_fence(ordering);
102        }
103        value
104    }
105    #[inline]
106    pub fn store(&self, value: usize, ordering: Ordering) {
107        if ordering != Ordering::Relaxed {
108            core::sync::atomic::compiler_fence(ordering);
109        }
110        self.0.store(value, Ordering::Relaxed);
111    }
112}
113#[derive(Debug, Default)]
114#[repr(transparent)]
115pub struct NonatomicUsize(AtomicUsize);
116
117impl NonatomicUsize {
118    #[inline]
119    pub const fn new(a: usize) -> Self {
120        Self(AtomicUsize::new(a))
121    }
122
123    #[inline]
124    pub fn get(&self) -> usize {
125        self.0.load(Ordering::Relaxed)
126    }
127    #[inline]
128    pub fn set(&self, value: usize) {
129        self.0.store(value, Ordering::Relaxed);
130    }
131}
132
133pub fn sig_bit(sig: usize) -> u64 {
134    1 << (sig - 1)
135}
136
137// TODO: Move to redox_rt?
138impl SigProcControl {
139    /// Checks if `sig` should be ignored based on the current action flags.
140    ///
141    /// * `sig` - The signal to check (e.g. `SIGCHLD`).
142    ///
143    /// * `stop_or_continue` - Whether the signal is generated because a child
144    /// process stopped (`SIGSTOP`, `SIGTSTP`) or continued (`SIGCONT`). If
145    /// `true` and `sig` is `SIGCHLD`, the signal shall not be delivered if the
146    /// `SA_NOCLDSTOP` flag is set for `SIGCHLD`.
147    pub fn signal_will_ign(&self, sig: usize, stop_or_continue: bool) -> bool {
148        let flags = self.actions[sig - 1].first.load(Ordering::Relaxed);
149        let will_ign = flags & (1 << 63) != 0;
150        let sig_specific = flags & (1 << 62) != 0; // SA_NOCLDSTOP if sig == SIGCHLD
151
152        will_ign || (sig == SIGCHLD && stop_or_continue && sig_specific)
153    }
154    // TODO: Move to redox_rt?
155    pub fn signal_will_stop(&self, sig: usize) -> bool {
156        use crate::flag::*;
157        matches!(sig, SIGTSTP | SIGTTIN | SIGTTOU)
158            && self.actions[sig - 1].first.load(Ordering::Relaxed) & (1 << 62) != 0
159    }
160}
161
162#[cfg(not(target_arch = "x86"))]
163pub use core::sync::atomic::AtomicU64;
164
165use crate::SIGCHLD;
166
167#[cfg(target_arch = "x86")]
168pub use self::atomic::AtomicU64;
169
170#[cfg(target_arch = "x86")]
171mod atomic {
172    use core::{cell::UnsafeCell, sync::atomic::Ordering};
173
174    #[derive(Debug, Default)]
175    pub struct AtomicU64(UnsafeCell<u64>);
176
177    unsafe impl Send for AtomicU64 {}
178    unsafe impl Sync for AtomicU64 {}
179
180    impl AtomicU64 {
181        pub const fn new(inner: u64) -> Self {
182            Self(UnsafeCell::new(inner))
183        }
184        pub fn compare_exchange(
185            &self,
186            old: u64,
187            new: u64,
188            _success: Ordering,
189            _failure: Ordering,
190        ) -> Result<u64, u64> {
191            let old_hi = (old >> 32) as u32;
192            let old_lo = old as u32;
193            let new_hi = (new >> 32) as u32;
194            let new_lo = new as u32;
195            let mut out_hi;
196            let mut out_lo;
197
198            unsafe {
199                core::arch::asm!("lock cmpxchg8b [{}]", in(reg) self.0.get(), inout("edx") old_hi => out_hi, inout("eax") old_lo => out_lo, in("ecx") new_hi, in("ebx") new_lo);
200            }
201
202            if old_hi == out_hi && old_lo == out_lo {
203                Ok(old)
204            } else {
205                Err(u64::from(out_lo) | (u64::from(out_hi) << 32))
206            }
207        }
208        pub fn load(&self, ordering: Ordering) -> u64 {
209            match self.compare_exchange(0, 0, ordering, ordering) {
210                Ok(new) => new,
211                Err(new) => new,
212            }
213        }
214        pub fn store(&self, new: u64, ordering: Ordering) {
215            let mut old = 0;
216
217            loop {
218                match self.compare_exchange(old, new, ordering, Ordering::Relaxed) {
219                    Ok(_) => break,
220                    Err(new) => {
221                        old = new;
222                        core::hint::spin_loop();
223                    }
224                }
225            }
226        }
227        pub fn fetch_update(
228            &self,
229            set_order: Ordering,
230            fetch_order: Ordering,
231            mut f: impl FnMut(u64) -> Option<u64>,
232        ) -> Result<u64, u64> {
233            let mut old = self.load(fetch_order);
234
235            loop {
236                let new = f(old).ok_or(old)?;
237                match self.compare_exchange(old, new, set_order, Ordering::Relaxed) {
238                    Ok(_) => return Ok(new),
239                    Err(changed) => {
240                        old = changed;
241                        core::hint::spin_loop();
242                    }
243                }
244            }
245        }
246        pub fn fetch_or(&self, bits: u64, order: Ordering) -> u64 {
247            self.fetch_update(order, Ordering::Relaxed, |b| Some(b | bits))
248                .unwrap()
249        }
250        pub fn fetch_and(&self, bits: u64, order: Ordering) -> u64 {
251            self.fetch_update(order, Ordering::Relaxed, |b| Some(b & bits))
252                .unwrap()
253        }
254        pub fn fetch_add(&self, term: u64, order: Ordering) -> u64 {
255            self.fetch_update(order, Ordering::Relaxed, |b| Some(b.wrapping_add(term)))
256                .unwrap()
257        }
258    }
259}
260
261#[cfg(test)]
262mod tests {
263    use std::sync::{
264        atomic::{AtomicU64, Ordering},
265        Arc,
266    };
267
268    #[cfg(not(loom))]
269    use std::{sync::Mutex, thread};
270    #[cfg(not(loom))]
271    fn model(f: impl FnOnce()) {
272        f()
273    }
274
275    #[cfg(loom)]
276    use loom::{model, sync::Mutex, thread};
277
278    use crate::{RawAction, SigProcControl, Sigcontrol};
279
280    struct FakeThread {
281        ctl: Sigcontrol,
282        pctl: SigProcControl,
283        ctxt: Mutex<()>,
284    }
285    impl Default for FakeThread {
286        fn default() -> Self {
287            Self {
288                ctl: Sigcontrol::default(),
289                pctl: SigProcControl {
290                    pending: AtomicU64::new(0),
291                    actions: core::array::from_fn(|_| RawAction::default()),
292                    sender_infos: Default::default(),
293                },
294                ctxt: Default::default(),
295            }
296        }
297    }
298
299    #[test]
300    fn singlethread_mask() {
301        model(|| {
302            let fake_thread = Arc::new(FakeThread::default());
303
304            let thread = {
305                let fake_thread = Arc::clone(&fake_thread);
306
307                thread::spawn(move || {
308                    fake_thread.ctl.set_allowset(!0);
309                    {
310                        let _g = fake_thread.ctxt.lock();
311                        if fake_thread
312                            .ctl
313                            .currently_pending_unblocked(&fake_thread.pctl)
314                            == 0
315                        {
316                            drop(_g);
317                            thread::park();
318                        }
319                    }
320                })
321            };
322
323            for sig in 1..=64 {
324                let _g = fake_thread.ctxt.lock();
325
326                let idx = sig - 1;
327                let bit = 1 << (idx % 32);
328
329                fake_thread.ctl.word[idx / 32].fetch_or(bit, Ordering::Relaxed);
330                let w = fake_thread.ctl.word[idx / 32].load(Ordering::Relaxed);
331
332                if w & (w >> 32) != 0 {
333                    thread.thread().unpark();
334                }
335            }
336
337            thread.join().unwrap();
338        });
339    }
340}