sys_alloc/
lib.rs

1use std::io;
2
3#[cfg(unix)]
4mod unix;
5#[cfg(unix)]
6use unix as os;
7
8#[cfg(windows)]
9mod windows;
10#[cfg(windows)]
11use windows as os;
12
13pub use os::page_size;
14
15/// Returns the system allocation granularity.
16///
17/// On Windows, this is typically 64KB. On Unix, this is typically the system page size.
18/// When requesting a specific address, it should be aligned to this granularity.
19#[must_use]
20pub fn allocation_granularity() -> usize {
21    #[cfg(windows)]
22    {
23        os::allocation_granularity()
24    }
25    #[cfg(unix)]
26    {
27        os::page_size()
28    }
29}
30
31/// A handle to a memory mapped region.
32///
33/// The region is automatically unmapped when this handle is dropped.
34pub struct Mmap {
35    inner: os::MmapInner,
36}
37
38impl Mmap {
39    /// Returns a pointer to the start of the memory mapping.
40    #[must_use]
41    pub const fn ptr(&self) -> *mut u8 {
42        self.inner.ptr()
43    }
44
45    /// Returns the length of the memory mapping in bytes.
46    #[must_use]
47    pub const fn len(&self) -> usize {
48        self.inner.len()
49    }
50
51    /// Returns true if the memory mapping has length 0.
52    #[must_use]
53    pub const fn is_empty(&self) -> bool {
54        self.len() == 0
55    }
56
57    /// Flushes the memory mapped region to disk (if file backed) or ensures
58    /// visibility. For anonymous mappings, this is generally a no-op or ensures
59    /// cache coherence.
60    ///
61    /// # Errors
62    ///
63    /// Returns an error if the underlying system call fails.
64    /// Currently for anonymous memory this always succeeds.
65    /// Requires `sys_alloc` to expose `from_raw` parts or similar.
66    /// Since `Mmap` wraps `os::MmapInner`, we need `os::MmapInner` to support this too or just handle it here.
67    ///
68    /// Consumes the `Mmap` and returns the raw pointer and length.
69    /// The memory will won't be unmapped when this struct is dropped.
70    /// The caller is responsible for cleaning up the memory, e.g. by
71    /// creating a new `Mmap` with `from_raw` and dropping it.
72    #[must_use]
73    pub const fn into_raw(self) -> (*mut u8, usize) {
74        let ptr = self.inner.ptr();
75        let len = self.inner.len();
76        std::mem::forget(self);
77        (ptr, len)
78    }
79
80    /// Creates a `Mmap` from a raw pointer and length.
81    ///
82    /// # Safety
83    ///
84    /// The pointer and length must have come from a previous call to `into_raw`.
85    /// The memory must be valid and unchanged.
86    pub const unsafe fn from_raw(ptr: *mut u8, len: usize) -> Self {
87        Self {
88            inner: unsafe { os::MmapInner::from_raw(ptr, len) },
89        }
90    }
91}
92
93unsafe impl Send for Mmap {}
94unsafe impl Sync for Mmap {}
95
96/// Configuration for creating a memory mapping.
97#[derive(Debug, Clone)]
98pub struct MmapOptions {
99    len: usize,
100    hint_addr: usize,
101    populate: bool,
102    no_reserve: bool,
103    strict: bool,
104}
105
106impl MmapOptions {
107    /// Creates a new `MmapOptions` with default settings (length 0).
108    /// You must set a length before mapping.
109    #[must_use]
110    pub const fn new() -> Self {
111        Self {
112            len: 0,
113            hint_addr: 0,
114            populate: false,
115            no_reserve: false,
116            strict: false,
117        }
118    }
119
120    /// Sets the length of the mapping in bytes.
121    #[must_use]
122    pub const fn len(mut self, len: usize) -> Self {
123        self.len = len;
124        self
125    }
126
127    /// Sets a hint address for the mapping.
128    ///
129    /// This is a request to the OS to place the mapping at this specific virtual address.
130    /// The OS is not required to honor this request (on some platforms), or the call
131    /// may fail if the address is already in use or invalid.
132    ///
133    /// For the best chance of success:
134    /// - The address should be aligned to `allocation_granularity()`.
135    /// - The address range `[hint_addr, hint_addr + len)` should be free.
136    #[must_use]
137    pub const fn with_hint(mut self, addr: usize) -> Self {
138        self.hint_addr = addr;
139        self
140    }
141
142    /// Sets whether to pre-populate (prefault) the page tables.
143    ///
144    /// On Linux, this adds `MAP_POPULATE`.
145    #[must_use]
146    pub const fn populate(mut self, populate: bool) -> Self {
147        self.populate = populate;
148        self
149    }
150
151    /// Sets whether to reserve swap space (on supported platforms).
152    ///
153    /// On Linux, this adds `MAP_NORESERVE`.
154    #[must_use]
155    pub const fn no_reserve(mut self, no_reserve: bool) -> Self {
156        self.no_reserve = no_reserve;
157        self
158    }
159
160    /// Sets whether the hint address is strict.
161    ///
162    /// If true, `map_anon` will return an error if the OS cannot map the memory
163    /// at the exact requested `hint_addr`.
164    #[must_use]
165    pub const fn strict(mut self, strict: bool) -> Self {
166        self.strict = strict;
167        self
168    }
169
170    /// Creates an anonymous memory map.
171    ///
172    /// # Safety
173    ///
174    /// This function is unsafe because it creates a raw memory mapping which has
175    /// implications for memory safety (e.g. use-after-free if the Mmap is dropped
176    /// while valid pointers into it still exist - though `Mmap` itself is safe,
177    /// using the raw pointer it yields requires care).
178    ///
179    /// Actually, `Mmap` owns the memory, so as long as `Mmap` is alive, the pointer is valid.
180    /// However, `sys_alloc` is a low-level crate, so we mark creation as unsafe
181    /// mostly because of the OS interactions and potential for UB if `hint_addr`
182    /// is misused in some extensive contexts (though simply asking for an addr is usually safe).
183    /// # Errors
184    ///
185    /// Returns an error if the length is 0, or if the system call fails (e.g. out of memory),
186    /// or if strict hint compliance is requested but cannot be satisfied.
187    pub unsafe fn map_anon(&self) -> io::Result<Mmap> {
188        if self.len == 0 {
189            return Err(io::Error::new(
190                io::ErrorKind::InvalidInput,
191                "length must be greater than 0",
192            ));
193        }
194
195        let inner = unsafe {
196            let inner =
197                os::MmapInner::map_anon(self.hint_addr, self.len, self.populate, self.no_reserve)?;
198
199            if self.strict && self.hint_addr != 0 {
200                let ptr = inner.ptr() as usize;
201                if ptr != self.hint_addr {
202                    // MmapInner drop will unmap the wrong memory
203                    return Err(io::Error::new(
204                        io::ErrorKind::AddrNotAvailable,
205                        format!(
206                            "Strict hint failed: requested {:#x}, got {:#x}",
207                            self.hint_addr, ptr
208                        ),
209                    ));
210                }
211            }
212
213            inner
214        };
215
216        Ok(Mmap { inner })
217    }
218}
219
220impl Default for MmapOptions {
221    fn default() -> Self {
222        Self::new()
223    }
224}
225
226#[cfg(test)]
227mod tests {
228    use super::*;
229    use std::ptr;
230
231    #[test]
232    fn test_page_size() {
233        let ps = page_size();
234        assert!(ps > 0);
235        assert_eq!(ps & (ps - 1), 0, "Page size should be power of 2");
236    }
237
238    #[test]
239    fn test_allocation_granularity() {
240        let ag = allocation_granularity();
241        assert!(ag > 0);
242        assert_eq!(
243            ag & (ag - 1),
244            0,
245            "Allocation granularity should be power of 2"
246        );
247        assert!(ag >= page_size());
248    }
249
250    #[test]
251    fn test_basic_map() {
252        let len = page_size();
253        let mmap = unsafe {
254            MmapOptions::new()
255                .len(len)
256                .map_anon()
257                .expect("failed to map")
258        };
259
260        let ptr = mmap.ptr();
261        assert!(!ptr.is_null());
262        assert_eq!(ptr as usize % page_size(), 0);
263
264        // Verification: Write to memory
265        unsafe {
266            ptr::write_volatile(ptr, 42);
267            assert_eq!(ptr::read_volatile(ptr), 42);
268        }
269    }
270
271    #[test]
272    fn test_map_with_hint() {
273        // This test is heuristic. We try to map at a specific high address.
274        // It might fail if the OS ASLR or memory limits prevent it,
275        // so we don't strictly assert success validation of the exact address,
276        // but we verify the API contract works without erroring on valid hint logic.
277
278        let len = allocation_granularity();
279
280        // Pick a high address that is likely available and aligned
281        // 0x6000_0000_0000 is the example from McCarthy
282        #[cfg(target_pointer_width = "64")]
283        let hint_base = 0x6000_0000_0000usize;
284        #[cfg(target_pointer_width = "32")]
285        let hint_base = 0x4000_0000usize;
286
287        let mmap_opts = MmapOptions::new().len(len).with_hint(hint_base);
288
289        // We allow failure here because test environment constraints are unknown
290        if let Ok(mmap) = unsafe { mmap_opts.map_anon() } {
291            let ptr = mmap.ptr();
292            println!("Requested: {:x}, Got: {:x}", hint_base, ptr as usize);
293
294            // If we got an address, it must be valid memory
295            unsafe {
296                ptr::write_volatile(ptr, 99);
297                assert_eq!(ptr::read_volatile(ptr), 99);
298            }
299        }
300    }
301
302    #[test]
303    fn test_strict_hint_success() {
304        let len = allocation_granularity();
305
306        // Use a high address likely to be free
307        #[cfg(target_pointer_width = "64")]
308        let hint_base = 0x6000_0000_0000usize;
309        #[cfg(target_pointer_width = "32")]
310        let hint_base = 0x4000_0000usize;
311
312        let mmap_opts = MmapOptions::new()
313            .len(len)
314            .with_hint(hint_base)
315            .strict(true);
316
317        // Attempt mapping. It might fail on some systems, but if it succeeds, address MUST match.
318        if let Ok(mmap) = unsafe { mmap_opts.map_anon() } {
319            assert_eq!(
320                mmap.ptr() as usize,
321                hint_base,
322                "Strict mapping returned wrong address"
323            );
324        } else {
325            // strict mapping failure is allowed (e.g. if address is taken),
326            // but if we were lucky enough to get memory, checking it matches is the test.
327            // We can't easily force failure without using a known taken address.
328        }
329    }
330
331    #[test]
332    fn test_strict_hint_fail() {
333        let len = allocation_granularity();
334
335        // 1. Map something to ensure the address is taken
336        #[cfg(target_pointer_width = "64")]
337        let hint_base = 0x6100_0000_0000usize;
338        #[cfg(target_pointer_width = "32")]
339        let hint_base = 0x5000_0000usize;
340
341        let mmap1 = unsafe {
342            MmapOptions::new()
343                .len(len)
344                .with_hint(hint_base)
345                // strict=false (default), so we just try to get it
346                .map_anon()
347        };
348
349        if let Ok(m1) = mmap1 {
350            // If we got the address (or any address), try to strict map OVER it.
351            // But mmap usually returns a different address if the hint is taken.
352            // Strict should reject that different address.
353
354            // If mmap1 got hint_base, good. If not, we use m1.ptr() as the "taken" address.
355            let taken_addr = m1.ptr() as usize;
356
357            let result = unsafe {
358                MmapOptions::new()
359                    .len(len)
360                    .with_hint(taken_addr)
361                    .strict(true)
362                    .map_anon()
363            };
364
365            // We expect failure because the address is already mapped.
366            // On Unix, mmap with hint usually returns a DIFFERENT address if taken.
367            // Our strict check should catch that and return error.
368            assert!(
369                result.is_err(),
370                "Strict mapping should fail on taken address"
371            );
372        }
373    }
374}