syscall/
flag.rs

1use bitflags::bitflags as inner_bitflags;
2use core::{mem, ops::Deref, slice};
3
4macro_rules! bitflags {
5    (
6        $(#[$outer:meta])*
7        pub struct $BitFlags:ident: $T:ty {
8            $(
9                $(#[$inner:ident $($args:tt)*])*
10                const $Flag:ident = $value:expr;
11            )+
12        }
13    ) => {
14        // First, use the inner bitflags
15        inner_bitflags! {
16            #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy, Default)]
17            $(#[$outer])*
18            pub struct $BitFlags: $T {
19                $(
20                    $(#[$inner $($args)*])*
21                    const $Flag = $value;
22                )+
23            }
24        }
25
26        impl $BitFlags {
27            #[deprecated = "use the safe `from_bits_retain` method instead"]
28            pub unsafe fn from_bits_unchecked(bits: $T) -> Self {
29                Self::from_bits_retain(bits)
30            }
31        }
32
33        // Secondly, re-export all inner constants
34        // (`pub use self::Struct::*` doesn't work)
35        $(
36            $(#[$inner $($args)*])*
37            pub const $Flag: $BitFlags = $BitFlags::$Flag;
38        )+
39    }
40}
41
42pub const CLOCK_REALTIME: usize = 1;
43pub const CLOCK_MONOTONIC: usize = 4;
44
45bitflags! {
46    pub struct EventFlags: usize {
47        const EVENT_NONE = 0;
48        const EVENT_READ = 1;
49        const EVENT_WRITE = 2;
50    }
51}
52
53pub const F_DUPFD: usize = 0;
54pub const F_GETFD: usize = 1;
55pub const F_SETFD: usize = 2;
56pub const F_GETFL: usize = 3;
57pub const F_SETFL: usize = 4;
58
59pub const FUTEX_WAIT: usize = 0;
60pub const FUTEX_WAKE: usize = 1;
61pub const FUTEX_REQUEUE: usize = 2;
62pub const FUTEX_WAIT64: usize = 3;
63
64// packet.c = fd
65pub const SKMSG_FRETURNFD: usize = 0;
66
67// packet.uid:packet.gid = offset, packet.c = base address, packet.d = page count
68pub const SKMSG_PROVIDE_MMAP: usize = 1;
69
70// packet.id provides state, packet.c = dest fd or pointer to dest fd, packet.d = flags
71pub const SKMSG_FOBTAINFD: usize = 2;
72
73// TODO: Split SendFdFlags into caller flags and flags that the scheme receives?
74bitflags::bitflags! {
75    #[derive(Clone, Copy, Debug)]
76    pub struct SendFdFlags: usize {
77        /// If set, the kernel will enforce that the file descriptors are exclusively owned.
78        ///
79        /// That is, there will no longer exist any other reference to those FDs when removed from
80        /// the file table (sendfd always removes the FDs from the file table, but without this
81        /// flag, it can be retained by SYS_DUPing them first).
82        const EXCLUSIVE = 1;
83
84        /// If set, the file descriptors will be cloned and *not* removed from the sender's file table.
85        /// By default, `SYS_SENDFD` moves the file descriptors, removing them from the sender.
86        const CLONE = 2;
87    }
88}
89bitflags::bitflags! {
90    #[derive(Clone, Copy, Debug)]
91    pub struct FobtainFdFlags: usize {
92        /// If set, the SYS_CALL payload specifies the destination file descriptor slots, otherwise the lowest
93        /// available slots will be selected, and placed in the usize pointed to by SYS_CALL
94        /// payload.
95        const MANUAL_FD = 1;
96
97        /// If set, the file descriptors received are guaranteed to be exclusively owned (by the file
98        /// table the obtainer is running in).
99        const EXCLUSIVE = 2;
100
101        /// If set, the file descriptors received will be placed into the *upper* file table.
102        const UPPER_TBL = 4;
103
104        // No, cloexec won't be stored in the kernel in the future, when the stable ABI is moved to
105        // relibc, so no flag for that!
106    }
107}
108bitflags::bitflags! {
109    #[derive(Clone, Copy, Debug)]
110    pub struct RecvFdFlags: usize {
111        /// If set, the SYS_CALL payload specifies the destination file descriptor slots, otherwise the lowest
112        /// available slots will be selected, and placed in the usize pointed to by SYS_CALL
113        /// payload.
114        const MANUAL_FD = 1;
115
116        /// If set, the file descriptors received will be placed into the *upper* file table.
117        const UPPER_TBL = 2;
118    }
119}
120bitflags::bitflags! {
121    #[derive(Clone, Copy, Debug)]
122    pub struct FmoveFdFlags: usize {
123        /// If set, the kernel will enforce that the file descriptors are exclusively owned.
124        ///
125        /// That is, there will no longer exist any other reference to those FDs when removed from
126        /// the file table (SYS_CALL always removes the FDs from the file table, but without this
127        /// flag, it can be retained by SYS_DUPing them first).
128        const EXCLUSIVE = 1;
129
130        /// If set, the file descriptors will be cloned and *not* removed from the sender's file table.
131        /// By default, sendfd moves the file descriptors, removing them from the sender.
132        const CLONE = 2;
133    }
134}
135
136bitflags! {
137    pub struct MapFlags: usize {
138        // TODO: Downgrade PROT_NONE to global constant? (bitflags specifically states zero flags
139        // can cause buggy behavior).
140        const PROT_NONE = 0x0000_0000;
141
142        const PROT_EXEC = 0x0001_0000;
143        const PROT_WRITE = 0x0002_0000;
144        const PROT_READ = 0x0004_0000;
145
146        const MAP_SHARED = 0x0001;
147        const MAP_PRIVATE = 0x0002;
148
149        const MAP_FIXED = 0x0004;
150        const MAP_FIXED_NOREPLACE = 0x000C;
151
152        /// For *userspace-backed mmaps*, return from the mmap call before all pages have been
153        /// provided by the scheme. This requires the scheme to be trusted, as the current context
154        /// can block indefinitely, if the scheme does not respond to the page fault handler's
155        /// request, as it tries to map the page by requesting it from the scheme.
156        ///
157        /// In some cases however, such as the program loader, the data needs to be trusted as much
158        /// with or without MAP_LAZY, and if so, mapping lazily will not cause insecureness by
159        /// itself.
160        ///
161        /// For kernel-backed mmaps, this flag has no effect at all. It is unspecified whether
162        /// kernel mmaps are lazy or not.
163        const MAP_LAZY = 0x0010;
164    }
165}
166bitflags! {
167    pub struct MunmapFlags: usize {
168        /// Indicates whether the funmap call must implicitly do an msync, for the changes to
169        /// become visible later.
170        ///
171        /// This flag will currently be set if and only if MAP_SHARED | PROT_WRITE are set.
172        const NEEDS_SYNC = 1;
173    }
174}
175
176pub const MODE_TYPE: u16 = 0xF000;
177pub const MODE_DIR: u16 = 0x4000;
178pub const MODE_FILE: u16 = 0x8000;
179pub const MODE_SYMLINK: u16 = 0xA000;
180pub const MODE_FIFO: u16 = 0x1000;
181pub const MODE_CHR: u16 = 0x2000;
182pub const MODE_SOCK: u16 = 0xC000;
183
184pub const MODE_PERM: u16 = 0x0FFF;
185pub const MODE_SETUID: u16 = 0o4000;
186pub const MODE_SETGID: u16 = 0o2000;
187
188pub const O_RDONLY: usize = 0x0001_0000;
189pub const O_WRONLY: usize = 0x0002_0000;
190pub const O_RDWR: usize = 0x0003_0000;
191pub const O_NONBLOCK: usize = 0x0004_0000;
192pub const O_APPEND: usize = 0x0008_0000;
193pub const O_SHLOCK: usize = 0x0010_0000;
194pub const O_EXLOCK: usize = 0x0020_0000;
195pub const O_ASYNC: usize = 0x0040_0000;
196pub const O_FSYNC: usize = 0x0080_0000;
197pub const O_CLOEXEC: usize = 0x0100_0000;
198pub const O_CREAT: usize = 0x0200_0000;
199pub const O_TRUNC: usize = 0x0400_0000;
200pub const O_EXCL: usize = 0x0800_0000;
201pub const O_DIRECTORY: usize = 0x1000_0000;
202pub const O_STAT: usize = 0x2000_0000;
203pub const O_SYMLINK: usize = 0x4000_0000;
204pub const O_NOFOLLOW: usize = 0x8000_0000;
205pub const O_ACCMODE: usize = O_RDONLY | O_WRONLY | O_RDWR;
206
207// The top 48 bits of PTRACE_* are reserved, for now
208
209// NOT ABI STABLE!
210#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
211#[repr(usize)]
212pub enum ContextStatus {
213    Runnable,
214    Blocked,
215    NotYetStarted,
216    Dead,
217    ForceKilled,
218    Stopped,
219    UnhandledExcp,
220    #[default]
221    Other, // reserved
222}
223
224#[derive(Clone, Copy, Debug, Eq, PartialEq)]
225#[repr(usize)]
226pub enum ContextVerb {
227    Stop = 1,
228    Unstop = 2,
229    Interrupt = 3,
230    ForceKill = usize::MAX,
231}
232impl ContextVerb {
233    pub fn try_from_raw(raw: usize) -> Option<Self> {
234        Some(match raw {
235            1 => Self::Stop,
236            2 => Self::Unstop,
237            3 => Self::Interrupt,
238            usize::MAX => Self::ForceKill,
239            _ => return None,
240        })
241    }
242}
243
244// NOT ABI STABLE!
245#[derive(Clone, Copy, Debug, Eq, PartialEq)]
246#[repr(u8)]
247pub enum ProcSchemeVerb {
248    Iopl = 255,
249}
250impl ProcSchemeVerb {
251    pub fn try_from_raw(verb: u8) -> Option<Self> {
252        Some(match verb {
253            255 => Self::Iopl,
254            _ => return None,
255        })
256    }
257}
258
259#[derive(Clone, Copy, Debug, Eq, PartialEq)]
260#[repr(usize)]
261pub enum SchemeSocketCall {
262    ObtainFd = 0,
263    MoveFd = 1,
264}
265impl SchemeSocketCall {
266    pub fn try_from_raw(raw: usize) -> Option<Self> {
267        Some(match raw {
268            0 => Self::ObtainFd,
269            1 => Self::MoveFd,
270            _ => return None,
271        })
272    }
273}
274
275#[derive(Clone, Copy, Debug, Eq, PartialEq)]
276#[repr(usize)]
277#[non_exhaustive]
278pub enum FsCall {
279    Connect = 0,
280}
281impl FsCall {
282    pub fn try_from_raw(raw: usize) -> Option<Self> {
283        Some(match raw {
284            0 => Self::Connect,
285            _ => return None,
286        })
287    }
288}
289
290bitflags! {
291    pub struct PtraceFlags: u64 {
292        /// Stop before a syscall is handled. Send PTRACE_FLAG_IGNORE to not
293        /// handle the syscall.
294        const PTRACE_STOP_PRE_SYSCALL = 0x0000_0000_0000_0001;
295        /// Stop after a syscall is handled.
296        const PTRACE_STOP_POST_SYSCALL = 0x0000_0000_0000_0002;
297        /// Stop after exactly one instruction. TODO: This may not handle
298        /// fexec/signal boundaries. Should it?
299        const PTRACE_STOP_SINGLESTEP = 0x0000_0000_0000_0004;
300        /// Stop before a signal is handled. Send PTRACE_FLAG_IGNORE to not
301        /// handle signal.
302        const PTRACE_STOP_SIGNAL = 0x0000_0000_0000_0008;
303        /// Stop on a software breakpoint, such as the int3 instruction for
304        /// x86_64.
305        const PTRACE_STOP_BREAKPOINT = 0x0000_0000_0000_0010;
306        /// Stop just before exiting for good.
307        const PTRACE_STOP_EXIT = 0x0000_0000_0000_0020;
308
309        const PTRACE_STOP_MASK = 0x0000_0000_0000_00FF;
310
311
312        /// Sent when a child is cloned, giving you the opportunity to trace it.
313        /// If you don't catch this, the child is started as normal.
314        const PTRACE_EVENT_CLONE = 0x0000_0000_0000_0100;
315
316        /// Sent when current-addrspace is changed, allowing the tracer to reopen the memory file.
317        const PTRACE_EVENT_ADDRSPACE_SWITCH = 0x0000_0000_0000_0200;
318
319        const PTRACE_EVENT_MASK = 0x0000_0000_0000_0F00;
320
321        /// Special meaning, depending on the event. Usually, when fired before
322        /// an action, it will skip performing that action.
323        const PTRACE_FLAG_IGNORE = 0x0000_0000_0000_1000;
324
325        const PTRACE_FLAG_MASK = 0x0000_0000_0000_F000;
326    }
327}
328impl Deref for PtraceFlags {
329    type Target = [u8];
330    fn deref(&self) -> &Self::Target {
331        // Same as to_ne_bytes but in-place
332        unsafe {
333            slice::from_raw_parts(&self.bits() as *const _ as *const u8, mem::size_of::<u64>())
334        }
335    }
336}
337
338pub const SEEK_SET: usize = 0;
339pub const SEEK_CUR: usize = 1;
340pub const SEEK_END: usize = 2;
341
342pub const SIGCHLD: usize = 17;
343pub const SIGTSTP: usize = 20;
344pub const SIGTTIN: usize = 21;
345pub const SIGTTOU: usize = 22;
346
347pub const ADDRSPACE_OP_MMAP: usize = 0;
348pub const ADDRSPACE_OP_MUNMAP: usize = 1;
349pub const ADDRSPACE_OP_MPROTECT: usize = 2;
350pub const ADDRSPACE_OP_TRANSFER: usize = 3;
351
352bitflags! {
353    pub struct MremapFlags: usize {
354        const FIXED = 1;
355        const FIXED_REPLACE = 3;
356        /// Alias's memory region at `old_address` to `new_address` such that both regions share
357        /// the same frames.
358        const KEEP_OLD = 1 << 2;
359        // TODO: MAYMOVE, DONTUNMAP
360    }
361}
362bitflags! {
363    pub struct RwFlags: u32 {
364        const NONBLOCK = 1;
365        const APPEND = 2;
366        // TODO: sync/dsync
367        // TODO: O_DIRECT?
368    }
369}
370bitflags! {
371    pub struct SigcontrolFlags: usize {
372        /// Prevents the kernel from jumping the context to the signal trampoline, but otherwise
373        /// has absolutely no effect on which signals are blocked etc. Meant to be used for
374        /// short-lived critical sections inside libc.
375        const INHIBIT_DELIVERY = 1;
376    }
377}
378bitflags! {
379    pub struct CallFlags: usize {
380        // reserved
381        const RSVD0 = 1 << 0;
382        const RSVD1 = 1 << 1;
383        const RSVD2 = 1 << 2;
384        const RSVD3 = 1 << 3;
385        const RSVD4 = 1 << 4;
386        const RSVD5 = 1 << 5;
387        const RSVD6 = 1 << 6;
388        const RSVD7 = 1 << 7;
389
390        /// Remove the fd from the caller's file table before sending the message.
391        const CONSUME = 1 << 8;
392
393        const WRITE = 1 << 9;
394        const READ = 1 << 10;
395
396        /// Indicates the request is a bulk fd passing request.
397        const FD = 1 << 11;
398        /// Flags for the fd passing request.
399        const FD_EXCLUSIVE = 1 << 12;
400        const FD_CLONE = 1 << 13;
401        const FD_UPPER = 1 << 14;
402    }
403}
404
405/// The tag for the fd number in the upper file descriptor table.
406pub const UPPER_FDTBL_TAG: usize = 1 << (usize::BITS - 2);