r_linux/syscall/arch/x86/
syscall.rs

1//! System Calls on x86
2//!
3//! This implements the syscall entries for x86. One function for each
4//! possible number of arguments is provided: syscall0 to syscall6.
5//!
6//! The implementation uses the x86-`int$0x80` software interrupt to enter the
7//! kernel. It would be much faster to use the VDSO entry point, but it does
8//! require access to `%gs` and the TLS mappings, and thus is left for future
9//! improvements (if anyone cares enough for 32bit x86).
10//!
11//! Arguments are passed as:
12//!     Nr: eax
13//!     Args: ebx, ecx, edx, esi, edi, ebp
14//! Return value is in:
15//!     Ret: eax
16//!
17//! The entry-points are currently not marked as `readonly`. That is, the
18//! system calls are allowed to modify memory. If necessary, alternative calls
19//! with `readonly` (or maybe even `pure`) can be provided in the future.
20
21#[cfg(target_arch = "x86")]
22#[inline]
23#[export_name = "r_linux_asm_syscall0"]
24pub unsafe fn syscall0(
25    nr: usize,
26) -> usize {
27    let mut r: usize;
28
29    core::arch::asm!(
30        "int $0x80",
31        inlateout("eax") nr => r,
32        options(nostack, preserves_flags)
33    );
34
35    r
36}
37
38#[cfg(target_arch = "x86")]
39#[inline]
40#[export_name = "r_linux_asm_syscall1"]
41pub unsafe fn syscall1(
42    nr: usize,
43    arg0: usize,
44) -> usize {
45    let mut r: usize;
46
47    core::arch::asm!(
48        "int $0x80",
49        inlateout("eax") nr => r,
50        in("ebx") arg0,
51        options(nostack, preserves_flags)
52    );
53
54    r
55}
56
57#[cfg(target_arch = "x86")]
58#[inline]
59#[export_name = "r_linux_asm_syscall2"]
60pub unsafe fn syscall2(
61    nr: usize,
62    arg0: usize,
63    arg1: usize,
64) -> usize {
65    let mut r: usize;
66
67    core::arch::asm!(
68        "int $0x80",
69        inlateout("eax") nr => r,
70        in("ebx") arg0,
71        in("ecx") arg1,
72        options(nostack, preserves_flags)
73    );
74
75    r
76}
77
78#[cfg(target_arch = "x86")]
79#[inline]
80#[export_name = "r_linux_asm_syscall3"]
81pub unsafe fn syscall3(
82    nr: usize,
83    arg0: usize,
84    arg1: usize,
85    arg2: usize,
86) -> usize {
87    let mut r: usize;
88
89    core::arch::asm!(
90        "int $0x80",
91        inlateout("eax") nr => r,
92        in("ebx") arg0,
93        in("ecx") arg1,
94        in("edx") arg2,
95        options(nostack, preserves_flags)
96    );
97
98    r
99}
100
101#[cfg(target_arch = "x86")]
102#[inline]
103#[export_name = "r_linux_asm_syscall4"]
104pub unsafe fn syscall4(
105    nr: usize,
106    arg0: usize,
107    arg1: usize,
108    arg2: usize,
109    arg3: usize,
110) -> usize {
111    let mut r: usize;
112
113    // LLVM reserves `esi` for inline-asm management (to make sure stack
114    // management is not corrupted). However, it is completely save to use
115    // `esi`, and it is not clobbered by the kernel. GCC allows using it for
116    // inline-asm input, but unfortunately LLVM does not. Hence, we have to
117    // manually swap it out with whatever was picked as alternative for arg3.
118    //
119    // Note that in most cases LLVM still picks `esi`, so this looks slightly
120    // stupid running `xchg esi, esi`. Unfortunately, there is little we can
121    // do about it, so we keep it as it is.
122    core::arch::asm!(
123        "xchg esi, {arg3}",
124        "int $0x80",
125        "xchg esi, {arg3}",
126        arg3 = in(reg) arg3,
127        inlateout("eax") nr => r,
128        in("ebx") arg0,
129        in("ecx") arg1,
130        in("edx") arg2,
131        options(nostack, preserves_flags)
132    );
133
134    r
135}
136
137#[cfg(target_arch = "x86")]
138#[inline]
139#[export_name = "r_linux_asm_syscall5"]
140pub unsafe fn syscall5(
141    nr: usize,
142    arg0: usize,
143    arg1: usize,
144    arg2: usize,
145    arg3: usize,
146    arg4: usize,
147) -> usize {
148    let mut r: usize;
149
150    // see syscall4() for `esi` handling
151    core::arch::asm!(
152        "xchg esi, {arg3}",
153        "int $0x80",
154        "xchg esi, {arg3}",
155        arg3 = in(reg) arg3,
156        inlateout("eax") nr => r,
157        in("ebx") arg0,
158        in("ecx") arg1,
159        in("edx") arg2,
160        in("edi") arg4,
161        options(nostack, preserves_flags)
162    );
163
164    r
165}
166
167#[cfg(target_arch = "x86")]
168#[inline]
169#[export_name = "r_linux_asm_syscall6"]
170pub unsafe fn syscall6(
171    nr: usize,
172    arg0: usize,
173    arg1: usize,
174    arg2: usize,
175    arg3: usize,
176    arg4: usize,
177    arg5: usize,
178) -> usize {
179    let mut r: usize;
180
181    // The last argument `arg5` needs to be passed in `ebp`. Again, LLVM does
182    // allow us to use it as `in`-register. Hence, we just let LLVM pick a
183    // register itself. Since there a none left, it will pick the right one,
184    // anyway. But we try to be safe and assume both `arg3` and `arg5` might
185    // be in other registers (or actually swapped). Hence, we just push the
186    // values to the stack, then save `esi` and `ebp`, then load the values
187    // into those registers and jump into the kernel. Afterwards, we restore
188    // `esi` and `ebp` again, and restore the registers picked by LLVM.
189    //
190    // Note that the assembly will likely look stupid, since `arg3` usually
191    // ends up being `esi` and `arg5` ends up being `ebp`. Unfortunately,
192    // there is little we can do to detect that scenario. However, a 6-argument
193    // syscall is likely not noticing the slight slowdown by this.
194    core::arch::asm!(
195        "push {arg3}",
196        "push {arg5}",
197        "push esi",
198        "push ebp",
199        "mov ebp, DWORD PTR [esp + 8]",
200        "mov esi, DWORD PTR [esp + 12]",
201        "int $0x80",
202        "pop ebp",
203        "pop esi",
204        "pop {arg5}",
205        "pop {arg3}",
206        arg3 = in(reg) arg3,
207        arg5 = in(reg) arg5,
208        inlateout("eax") nr => r,
209        in("ebx") arg0,
210        in("ecx") arg1,
211        in("edx") arg2,
212        in("edi") arg4,
213        options(preserves_flags)
214    );
215
216    r
217}