1use core::{
2 mem::ManuallyDrop,
3 ptr,
4};
5
6use super::abi::*;
7use crate::{
8 eprintln,
9 result_from_value,
10 unit_result_from_value,
11 AsRawFd,
12 RawFd,
13};
14
15pub const MAP_SHARED: u32 = 0x1;
16pub const MAP_PRIVATE: u32 = 0x2;
17pub const MAP_SHARED_VALIDATE: u32 = 0x3;
18pub const MAP_FIXED: u32 = 0x10;
19pub const MAP_ANONYMOUS: u32 = 0x20;
20pub const MAP_GROWSDOWN: u32 = 0x100;
21pub const MAP_LOCKED: u32 = 0x2000;
22pub const MAP_NORESERVE: u32 = 0x4000;
23pub const MAP_POPULATE: u32 = 0x8000;
24pub const MAP_NONBLOCK: u32 = 0x10000;
25pub const MAP_STACK: u32 = 0x20000;
26pub const MAP_HUGETLB: u32 = 0x40000;
27pub const MAP_SYNC: u32 = 0x80000;
28pub const MAP_FIXED_NOREPLACE: u32 = 0x100000;
29
30pub const MREMAP_MAYMOVE: u32 = 0x1;
31pub const MREMAP_FIXED: u32 = 0x2;
32pub const MREMAP_DONTUNMAP: u32 = 0x4;
33
34pub const PROT_NONE: u32 = 0;
35pub const PROT_READ: u32 = 1;
36pub const PROT_WRITE: u32 = 2;
37pub const PROT_EXEC: u32 = 4;
38
39pub struct Mmap {
49 mem: *mut [u8],
52}
53
54unsafe impl Send for Mmap {}
55unsafe impl Sync for Mmap {}
56
57impl Mmap {
58 pub unsafe fn from_raw(mem: *mut [u8]) -> Self {
67 Self { mem }
68 }
69
70 pub unsafe fn split_at(self, offset: usize) -> (Self, Self) {
82 let mut this = ManuallyDrop::new(self);
83 let (first, second) = this.as_mut().split_at_mut(offset);
84 (Mmap::from_raw(first), Mmap::from_raw(second))
85 }
86
87 pub fn into_raw(self) -> *mut [u8] {
91 let this = ManuallyDrop::new(self);
92 this.mem
93 }
94}
95
96impl AsRef<[u8]> for Mmap {
97 fn as_ref(&self) -> &[u8] {
98 unsafe { &*self.mem }
99 }
100}
101
102impl AsMut<[u8]> for Mmap {
103 fn as_mut(&mut self) -> &mut [u8] {
104 unsafe { &mut *self.mem }
105 }
106}
107
108impl Drop for Mmap {
109 fn drop(&mut self) {
110 if let Err(e) = unsafe { munmap(self.mem) } {
111 eprintln!(
112 "munmap({addr:p}, {len}): {e}",
113 addr = self.as_ref().as_ptr(),
114 len = self.as_ref().len(),
115 );
116 }
117 }
118}
119
120#[inline]
127pub unsafe fn mmap(
128 addr: *mut u8,
129 len: usize,
130 prot: u32,
131 flags: u32,
132 fd: RawFd,
133 off: usize,
134) -> crate::Result<Mmap> {
135 let ret = syscall_6(
136 9,
137 addr as usize,
138 len,
139 prot as usize,
140 flags as usize,
141 fd as isize as usize,
142 off,
143 );
144 let final_addr = result_from_value(ret)?;
145 let mem = ptr::slice_from_raw_parts_mut(final_addr as *mut u8, len);
146 Ok(Mmap { mem })
147}
148
149#[inline]
156pub unsafe fn mmap_file(
157 addr: *mut u8,
158 len: usize,
159 prot: u32,
160 flags: u32,
161 fd: &impl AsRawFd,
162 off: usize,
163) -> crate::Result<Mmap> {
164 mmap(addr, len, prot, flags, fd.as_raw_fd(), off)
165}
166
167#[inline]
169#[allow(clippy::not_unsafe_ptr_arg_deref)]
170pub fn mmap_anonymous(addr: *mut u8, len: usize, prot: u32, flags: u32) -> crate::Result<Mmap> {
171 unsafe { mmap(addr, len, prot, flags | MAP_ANONYMOUS, -1, 0) }
172}
173
174#[inline]
180pub unsafe fn mremap(
181 mmap: &mut Mmap,
182 new_len: usize,
183 flags: u32,
184 new_addr: *mut u8,
185) -> crate::Result<()> {
186 let ret = syscall_5(
187 25,
188 mmap.as_mut().as_mut_ptr() as usize,
189 mmap.as_mut().len(),
190 new_len,
191 flags as usize,
192 new_addr as usize,
193 );
194 let final_addr = result_from_value(ret)?;
195 let mem = ptr::slice_from_raw_parts_mut(final_addr as *mut u8, new_len);
196 mmap.mem = mem;
197 Ok(())
198}
199
200#[inline]
202pub fn mprotect(mmap: &Mmap, prot: u32) -> crate::Result<()> {
203 let ret = unsafe {
204 syscall_3(
205 10,
206 mmap.as_ref().as_ptr() as usize,
207 mmap.as_ref().len(),
208 prot as usize,
209 ) as i32
210 };
211 unit_result_from_value(ret)
212}
213
214#[inline]
220pub unsafe fn munmap(mem: *mut [u8]) -> crate::Result<()> {
221 let mem = &mut *mem;
222 let ret = syscall_2(11, mem.as_mut_ptr() as usize, mem.len()) as i32;
223 unit_result_from_value(ret)
224}