mmap_rs/mmap.rs
1use crate::areas::MemoryAreas;
2use crate::error::Error;
3use bitflags::bitflags;
4use std::fs::File;
5use std::ops::{Deref, DerefMut, Range};
6
7#[cfg(unix)]
8use crate::os_impl::unix as platform;
9
10#[cfg(windows)]
11use crate::os_impl::windows as platform;
12
13bitflags! {
14 /// The available flags to configure the allocated mapping.
15 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
16 pub struct MmapFlags: u32 {
17 /// Maps the pages as shared such that any modifcations are visible between processes.
18 ///
19 /// When mapping a file without specifying this flag, the pages may initially be mapped as
20 /// shared, but a private copy will be created when any process writes to the memory
21 /// mapping, such that any modification is not visible to other processes.
22 const SHARED = 1 << 0;
23
24 /// Ensure the allocated pages are populated, such that they do not cause page faults.
25 const POPULATE = 1 << 1;
26
27 /// Do not reserve swap space for this allocation.
28 ///
29 /// This flag acts as a no-op on platforms that do not support this feature.
30 const NO_RESERVE = 1 << 2;
31
32 /// Use huge pages for this allocation.
33 const HUGE_PAGES = 1 << 3;
34
35 /// The region grows downward like a stack on certain Unix platforms (e.g. FreeBSD).
36 ///
37 /// This flag acts as a no-op on platforms that do not support this feature.
38 const STACK = 1 << 4;
39
40 /// The pages will not be included in a core dump.
41 ///
42 /// This flag acts as a no-op on platforms that do not support this feature.
43 const NO_CORE_DUMP = 1 << 5;
44
45 /// Lock the physical memory to prevent page faults from happening when accessing the
46 /// pages.
47 const LOCKED = 1 << 6;
48
49 /// Suggest to use transparent huge pages for this allocation by calling `madvise()`.
50 ///
51 /// This flag acts as a no-op on platforms that do not support this feature.
52 const TRANSPARENT_HUGE_PAGES = 1 << 7;
53
54 /// Suggest that the mapped region will be accessed sequentially by calling `madvise()`.
55 ///
56 /// This flag acts as a no-op on platforms that do not support this feature.
57 const SEQUENTIAL = 1 << 8;
58
59 /// Suggest that the mapped region will be accessed randomly by calling `madvise()`.
60 ///
61 /// This flag acts as a no-op on platforms that do not support this feature.
62 const RANDOM_ACCESS = 1 << 9;
63
64 /// Suggest that the mapped region will be accessed in the near future by calling `madvise()`.
65 ///
66 /// This flag acts as a no-op on platforms that do not support this feature.
67 const WILLNEED = 1 << 10;
68
69 /// Suggest that the mapped region will be not accessed in the near future by calling `madvise()`.
70 ///
71 /// This flag acts as a no-op on platforms that do not support this feature.
72 const DONTNEED = 1 << 11;
73 }
74
75 /// The available flags to configure the allocated mapping, but that are considered unsafe to
76 /// use.
77 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
78 pub struct UnsafeMmapFlags: u32 {
79 /// Maps the memory mapping at the address specified, replacing any pages that have been
80 /// mapped at that address range.
81 ///
82 /// This is not supported on Microsoft Windows.
83 const MAP_FIXED = 1 << 0;
84
85 /// Allows mapping the page as RWX. While this may seem useful for self-modifying code and
86 /// JIT engines, it is instead recommended to convert between mutable and executable
87 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
88 ///
89 /// As it may be tempting to use this flag, this flag has been (indirectly) marked as
90 /// **unsafe**. Make sure to read the text below to understand the complications of this
91 /// flag before using it.
92 ///
93 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as
94 /// RWX mappings can potentially simplify such attacks. Without RWX mappings, attackers
95 /// instead have to resort to return-oriented programming (ROP) gadgets. To prevent buffer
96 /// overflow attacks, contemporary CPUs allow pages to be marked as non-executable which is
97 /// then used by the operating system to ensure that pages are either marked as writeable
98 /// or as executable, but not both. This is also known as W^X.
99 ///
100 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1
101 /// instruction and the L1 data cache, other architectures such as Arm and AArch64 do not.
102 /// If the user modified the pages, then executing the code may result in undefined
103 /// behavior. To ensure correct behavior a user has to flush the instruction cache after
104 /// modifying and before executing the page.
105 const JIT = 1 << 1;
106 }
107
108 /// A set of (supported) page sizes.
109 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
110 pub struct PageSizes: usize {
111 /// 4 KiB pages.
112 const _4K = 1 << 12;
113 /// 8 KiB pages.
114 const _8K = 1 << 13;
115 /// 16 KiB pages.
116 const _16K = 1 << 14;
117 /// 32 KiB pages.
118 const _32K = 1 << 15;
119 /// 64 KiB pages.
120 const _64K = 1 << 16;
121 /// 128 KiB pages.
122 const _128K = 1 << 17;
123 /// 256 KiB pages.
124 const _256K = 1 << 18;
125 /// 512 KiB pages.
126 const _512K = 1 << 19;
127 /// 1 MiB pages.
128 const _1M = 1 << 20;
129 /// 2 MiB pages.
130 const _2M = 1 << 21;
131 /// 4 MiB pages.
132 const _4M = 1 << 22;
133 /// 8 MiB pages.
134 const _8M = 1 << 23;
135 /// 16 MiB pages.
136 const _16M = 1 << 24;
137 /// 32 MiB pages.
138 const _32M = 1 << 25;
139 /// 64 MiB pages.
140 const _64M = 1 << 26;
141 /// 128 MiB pages.
142 const _128M = 1 << 27;
143 /// 256 MiB pages.
144 const _256M = 1 << 28;
145 /// 512 MiB pages.
146 const _512M = 1 << 29;
147 /// 1 GiB pages.
148 const _1G = 1 << 30;
149 /// 2 GiB pages.
150 const _2G = 1 << 31;
151 #[cfg(target_pointer_width = "64")]
152 /// 4 GiB pages.
153 const _4G = 1 << 32;
154 #[cfg(target_pointer_width = "64")]
155 /// 8 GiB pages.
156 const _8G = 1 << 33;
157 #[cfg(target_pointer_width = "64")]
158 /// 16 GiB pages.
159 const _16G = 1 << 34;
160 }
161}
162
163/// The preferred size of the pages uses, where the size is in log2 notation.
164///
165/// Note that not all the offered page sizes may be available on the current platform.
166#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
167pub struct PageSize(pub usize);
168
169impl PageSize {
170 /// Map the mapping using 4 KiB pages.
171 pub const _4K: Self = Self(12);
172 /// Map the mapping using 64 KiB pages.
173 pub const _64K: Self = Self(16);
174 /// Map the mapping using 512 KiB pages.
175 pub const _512K: Self = Self(19);
176 /// Map the mapping using 1 MiB pages.
177 pub const _1M: Self = Self(20);
178 /// Map the mapping using 2 MiB pages.
179 pub const _2M: Self = Self(21);
180 /// Map the mapping using 4 MiB pages.
181 pub const _4M: Self = Self(22);
182 /// Map the mapping using 8 MiB pages.
183 pub const _8M: Self = Self(23);
184 /// Map the mapping using 16 MiB pages.
185 pub const _16M: Self = Self(24);
186 /// Map the mapping using 32 MiB pages.
187 pub const _32M: Self = Self(25);
188 /// Map the mapping using 256 MiB pages.
189 pub const _256M: Self = Self(28);
190 /// Map the mapping using 512 MiB pages.
191 pub const _512M: Self = Self(29);
192 /// Map the mapping using 1 GiB pages.
193 pub const _1G: Self = Self(30);
194 /// Map the mapping using 2 GiB pages.
195 pub const _2G: Self = Self(31);
196 /// Map the mapping using 16 GiB pages.
197 pub const _16G: Self = Self(34);
198}
199
200impl TryFrom<PageSizes> for PageSize {
201 type Error = Error;
202
203 fn try_from(page_sizes: PageSizes) -> Result<PageSize, Error> {
204 if page_sizes.bits().count_ones() != 1 {
205 return Err(Error::InvalidSize);
206 }
207
208 Ok(PageSize(page_sizes.bits()))
209 }
210}
211
212macro_rules! reserved_mmap_impl {
213 ($t:ident) => {
214 impl $t {
215 /// Returns the start address of this mapping.
216 #[inline]
217 pub fn start(&self) -> usize {
218 self.inner.as_ptr() as usize
219 }
220
221 /// Returns the end address of this mapping.
222 #[inline]
223 pub fn end(&self) -> usize {
224 self.start() + self.size()
225 }
226
227 /// Yields a raw immutable pointer of this mapping.
228 #[inline]
229 pub fn as_ptr(&self) -> *const u8 {
230 self.inner.as_ptr()
231 }
232
233 /// Yields a raw mutable pointer of this mapping.
234 #[inline]
235 pub fn as_mut_ptr(&mut self) -> *mut u8 {
236 self.inner.as_mut_ptr()
237 }
238
239 /// Yields the size of this mapping.
240 #[inline]
241 pub fn size(&self) -> usize {
242 self.inner.size()
243 }
244
245 /// Merges the memory maps into one. The memory maps must be adjacent to each other and
246 /// share the same attributes and backing. On success, this consumes the other memory map
247 /// object. Otherwise, this returns an error together with the original memory map that
248 /// failed to be merged.
249 pub fn merge(&mut self, other: Self) -> Result<(), (Error, Self)> {
250 // Ensure the memory maps are adjacent.
251 if self.end() != other.start() {
252 return Err((Error::MustBeAdjacent, other));
253 }
254
255 // Ensure the protection attributes match.
256 let region = match MemoryAreas::query(self.start()) {
257 Ok(Some(region)) => region,
258 Ok(None) => return Err((Error::AttributeMismatch, other)),
259 Err(e) => return Err((e, other)),
260 };
261
262 let other_region = match MemoryAreas::query(other.start()) {
263 Ok(Some(region)) => region,
264 Ok(None) => return Err((Error::AttributeMismatch, other)),
265 Err(e) => return Err((e, other)),
266 };
267
268 if region.protection != other_region.protection {
269 return Err((Error::AttributeMismatch, other));
270 }
271
272 if let Err(e) = self.inner.merge(&other.inner) {
273 return Err((e, other));
274 }
275
276 std::mem::forget(other);
277
278 Ok(())
279 }
280
281 /// Splits the memory map into two at the given byte offset. The byte offset must be
282 /// page size aligned.
283 ///
284 /// Afterwards `self` is limited to the range `[0, at)`, and the returning memory
285 /// mapping is limited to `[at, len)`.
286 pub fn split_off(&mut self, at: usize) -> Result<Self, Error> {
287 let inner = self.inner.split_off(at)?;
288
289 Ok(Self { inner })
290 }
291
292 /// Splits the memory map into two at the given byte offset. The byte offset must be
293 /// page size aligned.
294 ///
295 /// Afterwards `self` is limited to the range `[at, len)`, and the returning memory
296 /// mapping is limited to `[0, at)`.
297 pub fn split_to(&mut self, at: usize) -> Result<Self, Error> {
298 let inner = self.inner.split_to(at)?;
299
300 Ok(Self { inner })
301 }
302 }
303 };
304}
305
306macro_rules! mmap_impl {
307 ($t:ident) => {
308 impl $t {
309 /// Locks the physical pages in memory such that accessing the mapping causes no page faults.
310 pub fn lock(&mut self) -> Result<(), Error> {
311 self.inner.lock()
312 }
313
314 /// Unlocks the physical pages in memory, allowing the operating system to swap out the pages
315 /// backing this memory mapping.
316 pub fn unlock(&mut self) -> Result<(), Error> {
317 self.inner.unlock()
318 }
319
320 /// Flushes a range of the memory mapping, i.e. this initiates writing dirty pages
321 /// within that range to the disk. Dirty pages are those whose contents have changed
322 /// since the file was mapped.
323 ///
324 /// On Microsoft Windows, this function does not flush the file metadata. Thus, it must
325 /// be followed with a call to [`File::sync_all`] to flush the file metadata. This also
326 /// causes the flush operaton to be synchronous.
327 ///
328 /// On other platforms, the flush operation is synchronous, i.e. this waits until the
329 /// flush operation completes.
330 pub fn flush(&self, range: Range<usize>) -> Result<(), Error> {
331 self.inner.flush(range)
332 }
333
334 /// Flushes a range of the memory mapping asynchronously, i.e. this initiates writing
335 /// dirty pages within that range to the disk without waiting for the flush operation
336 /// to complete. Dirty pages are those whose contents have changed since the file was
337 /// mapped.
338 pub fn flush_async(&self, range: Range<usize>) -> Result<(), Error> {
339 self.inner.flush_async(range)
340 }
341
342 /// This function can be used to flush the instruction cache on architectures where
343 /// this is required.
344 ///
345 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
346 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
347 /// modified the pages, then executing the code may result in undefined behavior. To ensure
348 /// correct behavior a user has to flush the instruction cache after modifying and before
349 /// executing the page.
350 pub fn flush_icache(&self) -> Result<(), Error> {
351 self.inner.flush_icache()
352 }
353
354 /// Remaps this memory mapping as inaccessible.
355 ///
356 /// In case of failure, this returns the ownership of `self`.
357 pub fn make_none(mut self) -> Result<MmapNone, (Self, Error)> {
358 if let Err(e) = self.inner.make_none() {
359 return Err((self, e));
360 }
361
362 Ok(MmapNone { inner: self.inner })
363 }
364
365 /// Remaps this memory mapping as immutable.
366 ///
367 /// In case of failure, this returns the ownership of `self`. If you are
368 /// not interested in this feature, you can use the implementation of
369 /// the [`TryFrom`] trait instead.
370 pub fn make_read_only(mut self) -> Result<Mmap, (Self, Error)> {
371 if let Err(e) = self.inner.make_read_only() {
372 return Err((self, e));
373 }
374
375 Ok(Mmap { inner: self.inner })
376 }
377
378 /// Remaps this memory mapping as executable.
379 ///
380 /// In case of failure, this returns the ownership of `self`.
381 pub fn make_exec(mut self) -> Result<Mmap, (Self, Error)> {
382 if let Err(e) = self.inner.make_exec() {
383 return Err((self, e));
384 }
385
386 if let Err(e) = self.inner.flush_icache() {
387 return Err((self, e));
388 }
389
390 Ok(Mmap { inner: self.inner })
391 }
392
393 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
394 ///
395 /// # Safety
396 ///
397 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
398 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
399 /// modified the pages, then executing the code may result in undefined behavior. To ensure
400 /// correct behavior a user has to flush the instruction cache after modifying and before
401 /// executing the page.
402 ///
403 /// In case of failure, this returns the ownership of `self`.
404 pub unsafe fn make_exec_no_flush(mut self) -> Result<Mmap, (Self, Error)> {
405 if let Err(e) = self.inner.make_exec() {
406 return Err((self, e));
407 }
408
409 Ok(Mmap { inner: self.inner })
410 }
411
412 /// Remaps this mapping to be mutable.
413 ///
414 /// In case of failure, this returns the ownership of `self`. If you are
415 /// not interested in this feature, you can use the implementation of
416 /// the [`TryFrom`] trait instead.
417 pub fn make_mut(mut self) -> Result<MmapMut, (Self, Error)> {
418 if let Err(e) = self.inner.make_mut() {
419 return Err((self, e));
420 }
421
422 Ok(MmapMut { inner: self.inner })
423 }
424
425 /// Remaps this mapping to be executable and mutable.
426 ///
427 /// While this may seem useful for self-modifying
428 /// code and JIT engines, it is instead recommended to convert between mutable and executable
429 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
430 ///
431 /// Make sure to read the text below to understand the complications of this function before
432 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
433 ///
434 /// # Safety
435 ///
436 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
437 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
438 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
439 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
440 /// the operating system to ensure that pages are either marked as writeable or as executable,
441 /// but not both. This is also known as W^X.
442 ///
443 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
444 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
445 /// modified the pages, then executing the code may result in undefined behavior. To ensure
446 /// correct behavior a user has to flush the instruction cache after modifying and before
447 /// executing the page.
448 ///
449 /// In case of failure, this returns the ownership of `self`.
450 pub unsafe fn make_exec_mut(mut self) -> Result<MmapMut, (Self, Error)> {
451 if let Err(e) = self.inner.make_exec_mut() {
452 return Err((self, e));
453 }
454
455 Ok(MmapMut { inner: self.inner })
456 }
457 }
458 };
459}
460
461/// Represents an inaccessible memory mapping.
462#[derive(Debug)]
463pub struct MmapNone {
464 inner: platform::Mmap,
465}
466
467mmap_impl!(MmapNone);
468reserved_mmap_impl!(MmapNone);
469
470/// Represents an immutable memory mapping.
471#[derive(Debug)]
472pub struct Mmap {
473 inner: platform::Mmap,
474}
475
476mmap_impl!(Mmap);
477reserved_mmap_impl!(Mmap);
478
479impl Mmap {
480 /// Extracts a slice containing the entire mapping.
481 ///
482 /// This is equivalent to `&mapping[..]`.
483 #[inline]
484 pub fn as_slice(&self) -> &[u8] {
485 &self[..]
486 }
487}
488
489impl Deref for Mmap {
490 type Target = [u8];
491
492 fn deref(&self) -> &Self::Target {
493 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
494 }
495}
496
497impl AsRef<[u8]> for Mmap {
498 fn as_ref(&self) -> &[u8] {
499 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
500 }
501}
502
503impl TryFrom<MmapMut> for Mmap {
504 type Error = Error;
505 fn try_from(mmap_mut: MmapMut) -> Result<Self, Self::Error> {
506 match mmap_mut.make_read_only() {
507 Ok(mmap) => Ok(mmap),
508 Err((_, e)) => Err(e),
509 }
510 }
511}
512
513impl TryFrom<MmapNone> for Mmap {
514 type Error = Error;
515 fn try_from(mmap_none: MmapNone) -> Result<Self, Self::Error> {
516 match mmap_none.make_read_only() {
517 Ok(mmap) => Ok(mmap),
518 Err((_, e)) => Err(e),
519 }
520 }
521}
522
523/// Represents a mutable memory mapping.
524#[derive(Debug)]
525pub struct MmapMut {
526 inner: platform::Mmap,
527}
528
529mmap_impl!(MmapMut);
530reserved_mmap_impl!(MmapMut);
531
532impl MmapMut {
533 /// Extracts a slice containing the entire mapping.
534 ///
535 /// This is equivalent to `&mapping[..]`.
536 #[inline]
537 pub fn as_slice(&self) -> &[u8] {
538 &self[..]
539 }
540
541 /// Extracts a mutable slice containing the entire mapping.
542 ///
543 /// This is equivalent to `&mut mapping[..]`.
544 #[inline]
545 pub fn as_mut_slice(&mut self) -> &mut [u8] {
546 &mut self[..]
547 }
548}
549
550impl TryFrom<Mmap> for MmapMut {
551 type Error = Error;
552 fn try_from(mmap: Mmap) -> Result<Self, Self::Error> {
553 match mmap.make_mut() {
554 Ok(mmap_mut) => Ok(mmap_mut),
555 Err((_, e)) => Err(e),
556 }
557 }
558}
559
560impl TryFrom<MmapNone> for MmapMut {
561 type Error = Error;
562 fn try_from(mmap_none: MmapNone) -> Result<Self, Self::Error> {
563 match mmap_none.make_mut() {
564 Ok(mmap_mut) => Ok(mmap_mut),
565 Err((_, e)) => Err(e),
566 }
567 }
568}
569
570impl Deref for MmapMut {
571 type Target = [u8];
572
573 fn deref(&self) -> &Self::Target {
574 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
575 }
576}
577
578impl DerefMut for MmapMut {
579 fn deref_mut(&mut self) -> &mut Self::Target {
580 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
581 }
582}
583
584impl AsRef<[u8]> for MmapMut {
585 fn as_ref(&self) -> &[u8] {
586 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
587 }
588}
589
590impl AsMut<[u8]> for MmapMut {
591 fn as_mut(&mut self) -> &mut [u8] {
592 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
593 }
594}
595
596/// Represents the options for the memory mapping.
597#[derive(Debug)]
598pub struct MmapOptions<'a> {
599 inner: platform::MmapOptions<'a>,
600}
601
602impl<'a> MmapOptions<'a> {
603 /// Constructs the `MmapOptions` builder. The size specified is the size of the mapping to be
604 /// allocated in bytes.
605 pub fn new(size: usize) -> Result<Self, Error> {
606 Ok(Self {
607 inner: platform::MmapOptions::new(size)?,
608 })
609 }
610
611 /// Returns the smallest possible page size for the current platform. The allocation size must
612 /// be aligned to the page size for the allocation to succeed.
613 pub fn page_size() -> usize {
614 platform::MmapOptions::page_size()
615 }
616
617 /// Returns the set of supported page sizes for the current platform.
618 pub fn page_sizes() -> Result<PageSizes, Error> {
619 platform::MmapOptions::page_sizes()
620 }
621
622 /// Returns the allocation granularity for the current platform. On some platforms the
623 /// allocation granularity may be a multiple of the page size. The start address of the
624 /// allocation must be aligned to `max(allocation_granularity, page_size)`.
625 pub fn allocation_granularity() -> usize {
626 platform::MmapOptions::allocation_granularity()
627 }
628
629 /// The desired address at which the memory should be mapped.
630 pub fn with_address(self, address: usize) -> Self {
631 Self {
632 inner: self.inner.with_address(address),
633 }
634 }
635
636 /// Whether the memory mapping should be backed by a [`File`] or not. If the memory mapping
637 /// should be mapped by a [`File`], then the user can also specify the offset within the file
638 /// at which the mapping should start.
639 ///
640 /// On Microsoft Windows, it may not be possible to extend the protection beyond the access
641 /// mask that has been used to open the file. For instance, if a file has been opened with read
642 /// access, then [`Mmap::make_mut()`] will not work. Furthermore, [`std::fs::OpenOptions`] does
643 /// not in itself provide a standardized way to open the file with executable access. However,
644 /// if the file is not opened with executable access, then it may not be possible to use
645 /// [`Mmap::make_exec()`]. Fortunately, Rust provides [`OpenOptionsExt`] that allows you to
646 /// open the file with executable access rights. See [`access_mode`] for more information.
647 ///
648 /// # Safety
649 ///
650 /// This function is marked as **unsafe** as the user should be aware that even in the case
651 /// that a file is mapped as immutable in the address space of the current process, it does not
652 /// guarantee that there does not exist any other mutable mapping to the file.
653 ///
654 /// On Microsoft Windows, it is possible to limit the access to shared reading or to be fully
655 /// exclusive using [`share_mode`].
656 ///
657 /// On most Unix systems, it is possible to use [`nix::fcntl::flock`]. However, keep in mind
658 /// that this provides an **advisory** locking scheme, and that implementations are therefore
659 /// required to be co-operative.
660 ///
661 /// On Linux, it is also possible to mark the file as immutable. See `man 2 ioctl_iflags` and
662 /// `man 1 chattr` for more information.
663 ///
664 /// [`OpenOptionsExt`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html
665 /// [`access_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.access_mode
666 /// [`share_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.share_mode
667 /// [`nix::fcntl::flock`]: https://docs.rs/nix/latest/nix/fcntl/fn.flock.html
668 pub unsafe fn with_file(self, file: &'a File, offset: u64) -> Self {
669 Self {
670 inner: self.inner.with_file(file, offset),
671 }
672 }
673
674 /// The desired configuration of the mapping. See [`MmapFlags`] for available options.
675 pub fn with_flags(self, flags: MmapFlags) -> Self {
676 Self {
677 inner: self.inner.with_flags(flags),
678 }
679 }
680
681 /// The desired configuration of the mapping. See [`UnsafeMmapFlags`] for available options.
682 ///
683 /// # Safety
684 ///
685 /// The flags that can be passed to this function have unsafe behavior associated with them.
686 pub unsafe fn with_unsafe_flags(self, flags: UnsafeMmapFlags) -> Self {
687 Self {
688 inner: self.inner.with_unsafe_flags(flags),
689 }
690 }
691
692 /// Whether this memory mapped should be backed by a specific page size or not.
693 pub fn with_page_size(self, page_size: PageSize) -> Self {
694 Self {
695 inner: self.inner.with_page_size(page_size),
696 }
697 }
698
699 /// Reserves inaccessible memory.
700 pub fn reserve_none(self) -> Result<ReservedNone, Error> {
701 Ok(ReservedNone {
702 inner: self.inner.reserve_none()?,
703 })
704 }
705
706 /// Reserves immutable memory.
707 pub fn reserve(self) -> Result<Reserved, Error> {
708 Ok(Reserved {
709 inner: self.inner.reserve()?,
710 })
711 }
712
713 /// Reserves executable memory.
714 pub fn reserve_exec(self) -> Result<Reserved, Error> {
715 Ok(Reserved {
716 inner: self.inner.reserve_exec()?,
717 })
718 }
719
720 /// Reserves mutable memory.
721 pub fn reserve_mut(self) -> Result<ReservedMut, Error> {
722 Ok(ReservedMut {
723 inner: self.inner.reserve_mut()?,
724 })
725 }
726
727 /// Reserves executable and mutable memory.
728 ///
729 /// # Safety
730 ///
731 /// See [`MmapOptions::map_exec_mut`] for more information.
732 pub unsafe fn reserve_exec_mut(self) -> Result<ReservedMut, Error> {
733 Ok(ReservedMut {
734 inner: self.inner.reserve_exec_mut()?,
735 })
736 }
737
738 /// Maps the memory as inaccessible.
739 pub fn map_none(self) -> Result<MmapNone, Error> {
740 Ok(MmapNone {
741 inner: self.inner.map_none()?,
742 })
743 }
744
745 /// Maps the memory as immutable.
746 pub fn map(self) -> Result<Mmap, Error> {
747 Ok(Mmap {
748 inner: self.inner.map()?,
749 })
750 }
751
752 /// Maps the memory as executable.
753 pub fn map_exec(self) -> Result<Mmap, Error> {
754 Ok(Mmap {
755 inner: self.inner.map_exec()?,
756 })
757 }
758
759 /// Maps the memory as mutable.
760 pub fn map_mut(self) -> Result<MmapMut, Error> {
761 Ok(MmapMut {
762 inner: self.inner.map_mut()?,
763 })
764 }
765
766 /// Maps the memory as executable and mutable. While this may seem useful for self-modifying
767 /// code and JIT engines, it is instead recommended to convert between mutable and executable
768 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
769 ///
770 /// Make sure to read the text below to understand the complications of this function before
771 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
772 ///
773 /// # Safety
774 ///
775 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
776 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
777 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
778 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
779 /// the operating system to ensure that pages are either marked as writeable or as executable,
780 /// but not both. This is also known as W^X.
781 ///
782 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
783 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
784 /// modified the pages, then executing the code may result in undefined behavior. To ensure
785 /// correct behavior a user has to flush the instruction cache after modifying and before
786 /// executing the page.
787 pub unsafe fn map_exec_mut(self) -> Result<MmapMut, Error> {
788 Ok(MmapMut {
789 inner: self.inner.map_exec_mut()?,
790 })
791 }
792}
793
794macro_rules! reserved_impl {
795 ($t:ident) => {
796 impl $t {
797 /// Returns `true` if the memory mapping is size 0.
798 #[inline]
799 pub fn is_empty(&self) -> bool {
800 self.inner.size() == 0
801 }
802
803 /// Yields the length of this mapping.
804 #[inline]
805 pub fn len(&self) -> usize {
806 self.inner.size()
807 }
808
809 /// Remaps this memory mapping as inaccessible.
810 ///
811 /// In case of failure, this returns the ownership of `self`. If you are
812 /// not interested in this feature, you can use the implementation of
813 /// the [`TryFrom`] trait instead.
814 pub fn make_none(mut self) -> Result<ReservedNone, (Self, Error)> {
815 if let Err(e) = self.inner.make_none() {
816 return Err((self, e));
817 }
818
819 Ok(ReservedNone { inner: self.inner })
820 }
821
822 /// Remaps this memory mapping as immutable.
823 ///
824 /// In case of failure, this returns the ownership of `self`. If you are
825 /// not interested in this feature, you can use the implementation of
826 /// the [`TryFrom`] trait instead.
827 pub fn make_read_only(mut self) -> Result<Reserved, (Self, Error)> {
828 if let Err(e) = self.inner.make_read_only() {
829 return Err((self, e));
830 }
831
832 Ok(Reserved { inner: self.inner })
833 }
834
835 /// Remaps this memory mapping as executable.
836 ///
837 /// In case of failure, this returns the ownership of `self`.
838 pub fn make_exec(mut self) -> Result<Reserved, (Self, Error)> {
839 if let Err(e) = self.inner.make_exec() {
840 return Err((self, e));
841 }
842
843 if let Err(e) = self.inner.flush_icache() {
844 return Err((self, e));
845 }
846
847 Ok(Reserved { inner: self.inner })
848 }
849
850 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
851 ///
852 /// # Safety
853 ///
854 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
855 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
856 /// modified the pages, then executing the code may result in undefined behavior. To ensure
857 /// correct behavior a user has to flush the instruction cache after modifying and before
858 /// executing the page.
859 ///
860 /// In case of failure, this returns the ownership of `self`.
861 pub unsafe fn make_exec_no_flush(mut self) -> Result<Reserved, (Self, Error)> {
862 if let Err(e) = self.inner.make_exec() {
863 return Err((self, e));
864 }
865
866 Ok(Reserved { inner: self.inner })
867 }
868
869 /// Remaps this mapping to be mutable.
870 ///
871 /// In case of failure, this returns the ownership of `self`. If you are
872 /// not interested in this feature, you can use the implementation of
873 /// the [`TryFrom`] trait instead.
874 pub fn make_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
875 if let Err(e) = self.inner.make_mut() {
876 return Err((self, e));
877 }
878
879 Ok(ReservedMut { inner: self.inner })
880 }
881
882 /// Remaps this mapping to be executable and mutable.
883 ///
884 /// While this may seem useful for self-modifying
885 /// code and JIT engines, it is instead recommended to convert between mutable and executable
886 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
887 ///
888 /// Make sure to read the text below to understand the complications of this function before
889 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
890 ///
891 /// # Safety
892 ///
893 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
894 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
895 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
896 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
897 /// the operating system to ensure that pages are either marked as writeable or as executable,
898 /// but not both. This is also known as W^X.
899 ///
900 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
901 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
902 /// modified the pages, then executing the code may result in undefined behavior. To ensure
903 /// correct behavior a user has to flush the instruction cache after modifying and before
904 /// executing the page.
905 ///
906 /// In case of failure, this returns the ownership of `self`.
907 pub unsafe fn make_exec_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
908 if let Err(e) = self.inner.make_exec_mut() {
909 return Err((self, e));
910 }
911
912 Ok(ReservedMut { inner: self.inner })
913 }
914 }
915 };
916}
917
918/// Represents an inaccessible memory mapping in a reserved state, i.e. a memory mapping that is not
919/// backed by any physical pages yet.
920#[derive(Debug)]
921pub struct ReservedNone {
922 inner: platform::Mmap,
923}
924
925reserved_impl!(ReservedNone);
926reserved_mmap_impl!(ReservedNone);
927
928impl TryFrom<ReservedNone> for MmapNone {
929 type Error = Error;
930
931 fn try_from(mut reserved_none: ReservedNone) -> Result<MmapNone, Error> {
932 reserved_none.inner.commit()?;
933
934 Ok(MmapNone {
935 inner: reserved_none.inner,
936 })
937 }
938}
939
940impl TryFrom<ReservedMut> for Reserved {
941 type Error = Error;
942 fn try_from(mmap_mut: ReservedMut) -> Result<Self, Self::Error> {
943 match mmap_mut.make_read_only() {
944 Ok(mmap) => Ok(mmap),
945 Err((_, e)) => Err(e),
946 }
947 }
948}
949
950impl TryFrom<ReservedNone> for Reserved {
951 type Error = Error;
952 fn try_from(mmap_none: ReservedNone) -> Result<Self, Self::Error> {
953 match mmap_none.make_read_only() {
954 Ok(mmap) => Ok(mmap),
955 Err((_, e)) => Err(e),
956 }
957 }
958}
959
960/// Represents an immutable memory mapping in a reserved state, i.e. a memory mapping that is not
961/// backed by any physical pages yet.
962#[derive(Debug)]
963pub struct Reserved {
964 inner: platform::Mmap,
965}
966
967reserved_impl!(Reserved);
968reserved_mmap_impl!(Reserved);
969
970impl TryFrom<Reserved> for Mmap {
971 type Error = Error;
972
973 fn try_from(mut reserved: Reserved) -> Result<Mmap, Error> {
974 reserved.inner.commit()?;
975
976 Ok(Mmap {
977 inner: reserved.inner,
978 })
979 }
980}
981
982/// Represents a mutable memory mapping in a reserved state, i.e. a memory mapping that is not
983/// backed by any physical pages yet.
984#[derive(Debug)]
985pub struct ReservedMut {
986 inner: platform::Mmap,
987}
988
989reserved_impl!(ReservedMut);
990reserved_mmap_impl!(ReservedMut);
991
992impl TryFrom<ReservedMut> for MmapMut {
993 type Error = Error;
994
995 fn try_from(mut reserved_mut: ReservedMut) -> Result<MmapMut, Error> {
996 reserved_mut.inner.commit()?;
997
998 Ok(MmapMut {
999 inner: reserved_mut.inner,
1000 })
1001 }
1002}
1003
1004impl TryFrom<Reserved> for ReservedMut {
1005 type Error = Error;
1006 fn try_from(mmap: Reserved) -> Result<Self, Self::Error> {
1007 match mmap.make_mut() {
1008 Ok(mmap_mut) => Ok(mmap_mut),
1009 Err((_, e)) => Err(e),
1010 }
1011 }
1012}
1013
1014impl TryFrom<ReservedNone> for ReservedMut {
1015 type Error = Error;
1016 fn try_from(mmap_none: ReservedNone) -> Result<Self, Self::Error> {
1017 match mmap_none.make_mut() {
1018 Ok(mmap_mut) => Ok(mmap_mut),
1019 Err((_, e)) => Err(e),
1020 }
1021 }
1022}