mmap_rs_with_map_from_existing/mmap.rs
1use crate::areas::MemoryAreas;
2use crate::error::Error;
3use bitflags::bitflags;
4use std::fs::File;
5use std::ops::{Deref, DerefMut, Range};
6
7#[cfg(unix)]
8use crate::os_impl::unix as platform;
9
10#[cfg(windows)]
11use crate::os_impl::windows as platform;
12
13bitflags! {
14 /// The available flags to configure the allocated mapping.
15 pub struct MmapFlags: u32 {
16 /// Maps the pages as shared such that any modifcations are visible between processes.
17 ///
18 /// When mapping a file without specifying this flag, the pages may initially be mapped as
19 /// shared, but a private copy will be created when any process writes to the memory
20 /// mapping, such that any modification is not visible to other processes.
21 const SHARED = 1 << 0;
22
23 /// Ensure the allocated pages are populated, such that they do not cause page faults.
24 const POPULATE = 1 << 1;
25
26 /// Do not reserve swap space for this allocation.
27 ///
28 /// This flag acts as a no-op on platforms that do not support this feature.
29 const NO_RESERVE = 1 << 2;
30
31 /// Use huge pages for this allocation.
32 const HUGE_PAGES = 1 << 3;
33
34 /// The region grows downward like a stack on certain Unix platforms (e.g. FreeBSD).
35 ///
36 /// This flag acts as a no-op on platforms that do not support this feature.
37 const STACK = 1 << 4;
38
39 /// The pages will not be included in a core dump.
40 ///
41 /// This flag acts as a no-op on platforms that do not support this feature.
42 const NO_CORE_DUMP = 1 << 5;
43
44 /// Lock the physical memory to prevent page faults from happening when accessing the
45 /// pages.
46 const LOCKED = 1 << 6;
47
48 /// Suggest to use transparent huge pages for this allocation by calling madvise().
49 ///
50 /// This flag acts as a no-op on platforms that do not support this feature.
51 const TRANSPARENT_HUGE_PAGES = 1 << 7;
52 }
53
54 /// The available flags to configure the allocated mapping, but that are considered unsafe to
55 /// use.
56 pub struct UnsafeMmapFlags: u32 {
57 /// Maps the memory mapping at the address specified, replacing any pages that have been
58 /// mapped at that address range.
59 ///
60 /// This is not supported on Microsoft Windows.
61 const MAP_FIXED = 1 << 0;
62
63 /// Allows mapping the page as RWX. While this may seem useful for self-modifying code and
64 /// JIT engines, it is instead recommended to convert between mutable and executable
65 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
66 ///
67 /// As it may be tempting to use this flag, this flag has been (indirectly) marked as
68 /// **unsafe**. Make sure to read the text below to understand the complications of this
69 /// flag before using it.
70 ///
71 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as
72 /// RWX mappings can potentially simplify such attacks. Without RWX mappings, attackers
73 /// instead have to resort to return-oriented programming (ROP) gadgets. To prevent buffer
74 /// overflow attacks, contemporary CPUs allow pages to be marked as non-executable which is
75 /// then used by the operating system to ensure that pages are either marked as writeable
76 /// or as executable, but not both. This is also known as W^X.
77 ///
78 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1
79 /// instruction and the L1 data cache, other architectures such as Arm and AArch64 do not.
80 /// If the user modified the pages, then executing the code may result in undefined
81 /// behavior. To ensure correct behavior a user has to flush the instruction cache after
82 /// modifying and before executing the page.
83 const JIT = 1 << 1;
84 }
85
86 /// A set of (supported) page sizes.
87 pub struct PageSizes: usize {
88 /// 4 KiB pages.
89 const _4K = 1 << 12;
90 /// 8 KiB pages.
91 const _8K = 1 << 13;
92 /// 16 KiB pages.
93 const _16K = 1 << 14;
94 /// 32 KiB pages.
95 const _32K = 1 << 15;
96 /// 64 KiB pages.
97 const _64K = 1 << 16;
98 /// 128 KiB pages.
99 const _128K = 1 << 17;
100 /// 256 KiB pages.
101 const _256K = 1 << 18;
102 /// 512 KiB pages.
103 const _512K = 1 << 19;
104 /// 1 MiB pages.
105 const _1M = 1 << 20;
106 /// 2 MiB pages.
107 const _2M = 1 << 21;
108 /// 4 MiB pages.
109 const _4M = 1 << 22;
110 /// 8 MiB pages.
111 const _8M = 1 << 23;
112 /// 16 MiB pages.
113 const _16M = 1 << 24;
114 /// 32 MiB pages.
115 const _32M = 1 << 25;
116 /// 64 MiB pages.
117 const _64M = 1 << 26;
118 /// 128 MiB pages.
119 const _128M = 1 << 27;
120 /// 256 MiB pages.
121 const _256M = 1 << 28;
122 /// 512 MiB pages.
123 const _512M = 1 << 29;
124 /// 1 GiB pages.
125 const _1G = 1 << 30;
126 /// 2 GiB pages.
127 const _2G = 1 << 31;
128 #[cfg(target_pointer_width = "64")]
129 /// 4 GiB pages.
130 const _4G = 1 << 32;
131 #[cfg(target_pointer_width = "64")]
132 /// 8 GiB pages.
133 const _8G = 1 << 33;
134 #[cfg(target_pointer_width = "64")]
135 /// 16 GiB pages.
136 const _16G = 1 << 34;
137 }
138}
139
140/// The preferred size of the pages uses, where the size is in log2 notation.
141///
142/// Note that not all the offered page sizes may be available on the current platform.
143#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
144pub struct PageSize(pub usize);
145
146impl PageSize {
147 /// Map the mapping using 4 KiB pages.
148 pub const _4K: Self = Self(12);
149 /// Map the mapping using 64 KiB pages.
150 pub const _64K: Self = Self(16);
151 /// Map the mapping using 512 KiB pages.
152 pub const _512K: Self = Self(19);
153 /// Map the mapping using 1 MiB pages.
154 pub const _1M: Self = Self(20);
155 /// Map the mapping using 2 MiB pages.
156 pub const _2M: Self = Self(21);
157 /// Map the mapping using 4 MiB pages.
158 pub const _4M: Self = Self(22);
159 /// Map the mapping using 8 MiB pages.
160 pub const _8M: Self = Self(23);
161 /// Map the mapping using 16 MiB pages.
162 pub const _16M: Self = Self(24);
163 /// Map the mapping using 32 MiB pages.
164 pub const _32M: Self = Self(25);
165 /// Map the mapping using 256 MiB pages.
166 pub const _256M: Self = Self(28);
167 /// Map the mapping using 512 MiB pages.
168 pub const _512M: Self = Self(29);
169 /// Map the mapping using 1 GiB pages.
170 pub const _1G: Self = Self(30);
171 /// Map the mapping using 2 GiB pages.
172 pub const _2G: Self = Self(31);
173 /// Map the mapping using 16 GiB pages.
174 pub const _16G: Self = Self(34);
175}
176
177impl TryInto<PageSize> for PageSizes {
178 type Error = Error;
179
180 fn try_into(self) -> Result<PageSize, Error> {
181 if self.bits().count_ones() != 1 {
182 return Err(Error::InvalidSize);
183 }
184
185 Ok(PageSize(self.bits()))
186 }
187}
188
189macro_rules! reserved_mmap_impl {
190 ($t:ident) => {
191 impl $t {
192 /// Returns the start address of this mapping.
193 #[inline]
194 pub fn start(&self) -> usize {
195 self.inner.as_ptr() as usize
196 }
197
198 /// Returns the end address of this mapping.
199 #[inline]
200 pub fn end(&self) -> usize {
201 self.start() + self.size()
202 }
203
204 /// Yields a raw immutable pointer of this mapping.
205 #[inline]
206 pub fn as_ptr(&self) -> *const u8 {
207 self.inner.as_ptr()
208 }
209
210 /// Yields a raw mutable pointer of this mapping.
211 #[inline]
212 pub fn as_mut_ptr(&mut self) -> *mut u8 {
213 self.inner.as_mut_ptr()
214 }
215
216 /// Yields the size of this mapping.
217 #[inline]
218 pub fn size(&self) -> usize {
219 self.inner.size()
220 }
221
222 /// Merges the memory maps into one. The memory maps must be adjacent to each other and
223 /// share the same attributes and backing. On success, this consumes the other memory map
224 /// object. Otherwise, this returns an error together with the original memory map that
225 /// failed to be merged.
226 pub fn merge(&mut self, other: Self) -> Result<(), (Error, Self)> {
227 // Ensure the memory maps are adjacent.
228 if self.end() != other.start() {
229 return Err((Error::MustBeAdjacent, other));
230 }
231
232 // Ensure the protection attributes match.
233 let region = match MemoryAreas::query(self.start()) {
234 Ok(Some(region)) => region,
235 Ok(None) => return Err((Error::AttributeMismatch, other)),
236 Err(e) => return Err((e, other)),
237 };
238
239 let other_region = match MemoryAreas::query(other.start()) {
240 Ok(Some(region)) => region,
241 Ok(None) => return Err((Error::AttributeMismatch, other)),
242 Err(e) => return Err((e, other)),
243 };
244
245 if region.protection != other_region.protection {
246 return Err((Error::AttributeMismatch, other));
247 }
248
249 if let Err(e) = self.inner.merge(&other.inner) {
250 return Err((e, other));
251 }
252
253 std::mem::forget(other);
254
255 Ok(())
256 }
257
258 /// Splits the memory map into two at the given byte offset. The byte offset must be
259 /// page size aligned.
260 ///
261 /// Afterwards `self` is limited to the range `[0, at)`, and the returning memory
262 /// mapping is limited to `[at, len)`.
263 pub fn split_off(&mut self, at: usize) -> Result<Self, Error> {
264 let inner = self.inner.split_off(at)?;
265
266 Ok(Self { inner })
267 }
268
269 /// Splits the memory map into two at the given byte offset. The byte offset must be
270 /// page size aligned.
271 ///
272 /// Afterwards `self` is limited to the range `[at, len)`, and the returning memory
273 /// mapping is limited to `[0, at)`.
274 pub fn split_to(&mut self, at: usize) -> Result<Self, Error> {
275 let inner = self.inner.split_to(at)?;
276
277 Ok(Self { inner })
278 }
279 }
280 };
281}
282
283macro_rules! mmap_impl {
284 ($t:ident) => {
285 impl $t {
286 /// Locks the physical pages in memory such that accessing the mapping causes no page faults.
287 pub fn lock(&mut self) -> Result<(), Error> {
288 self.inner.lock()
289 }
290
291 /// Unlocks the physical pages in memory, allowing the operating system to swap out the pages
292 /// backing this memory mapping.
293 pub fn unlock(&mut self) -> Result<(), Error> {
294 self.inner.unlock()
295 }
296
297 /// Flushes a range of the memory mapping, i.e. this initiates writing dirty pages
298 /// within that range to the disk. Dirty pages are those whose contents have changed
299 /// since the file was mapped.
300 ///
301 /// On Microsoft Windows, this function does not flush the file metadata. Thus, it must
302 /// be followed with a call to [`File::sync_all`] to flush the file metadata. This also
303 /// causes the flush operaton to be synchronous.
304 ///
305 /// On other platforms, the flush operation is synchronous, i.e. this waits until the
306 /// flush operation completes.
307 pub fn flush(&self, range: Range<usize>) -> Result<(), Error> {
308 self.inner.flush(range)
309 }
310
311 /// Flushes a range of the memory mapping asynchronously, i.e. this initiates writing
312 /// dirty pages within that range to the disk without waiting for the flush operation
313 /// to complete. Dirty pages are those whose contents have changed since the file was
314 /// mapped.
315 pub fn flush_async(&self, range: Range<usize>) -> Result<(), Error> {
316 self.inner.flush_async(range)
317 }
318
319 /// This function can be used to flush the instruction cache on architectures where
320 /// this is required.
321 ///
322 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
323 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
324 /// modified the pages, then executing the code may result in undefined behavior. To ensure
325 /// correct behavior a user has to flush the instruction cache after modifying and before
326 /// executing the page.
327 pub fn flush_icache(&self) -> Result<(), Error> {
328 self.inner.flush_icache()
329 }
330
331 /// Remaps this memory mapping as inaccessible.
332 ///
333 /// In case of failure, this returns the ownership of `self`.
334 pub fn make_none(mut self) -> Result<MmapNone, (Self, Error)> {
335 if let Err(e) = self.inner.make_none() {
336 return Err((self, e));
337 }
338
339 Ok(MmapNone { inner: self.inner })
340 }
341
342 /// Remaps this memory mapping as immutable.
343 ///
344 /// In case of failure, this returns the ownership of `self`.
345 pub fn make_read_only(mut self) -> Result<Mmap, (Self, Error)> {
346 if let Err(e) = self.inner.make_read_only() {
347 return Err((self, e));
348 }
349
350 Ok(Mmap { inner: self.inner })
351 }
352
353 /// Remaps this memory mapping as executable.
354 ///
355 /// In case of failure, this returns the ownership of `self`.
356 pub fn make_exec(mut self) -> Result<Mmap, (Self, Error)> {
357 if let Err(e) = self.inner.make_exec() {
358 return Err((self, e));
359 }
360
361 if let Err(e) = self.inner.flush_icache() {
362 return Err((self, e));
363 }
364
365 Ok(Mmap { inner: self.inner })
366 }
367
368 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
369 ///
370 /// # Safety
371 ///
372 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
373 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
374 /// modified the pages, then executing the code may result in undefined behavior. To ensure
375 /// correct behavior a user has to flush the instruction cache after modifying and before
376 /// executing the page.
377 ///
378 /// In case of failure, this returns the ownership of `self`.
379 pub unsafe fn make_exec_no_flush(mut self) -> Result<Mmap, (Self, Error)> {
380 if let Err(e) = self.inner.make_exec() {
381 return Err((self, e));
382 }
383
384 Ok(Mmap { inner: self.inner })
385 }
386
387 /// Remaps this mapping to be mutable.
388 ///
389 /// In case of failure, this returns the ownership of `self`.
390 pub fn make_mut(mut self) -> Result<MmapMut, (Self, Error)> {
391 if let Err(e) = self.inner.make_mut() {
392 return Err((self, e));
393 }
394
395 Ok(MmapMut { inner: self.inner })
396 }
397
398 /// Remaps this mapping to be executable and mutable.
399 ///
400 /// While this may seem useful for self-modifying
401 /// code and JIT engines, it is instead recommended to convert between mutable and executable
402 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
403 ///
404 /// Make sure to read the text below to understand the complications of this function before
405 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
406 ///
407 /// # Safety
408 ///
409 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
410 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
411 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
412 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
413 /// the operating system to ensure that pages are either marked as writeable or as executable,
414 /// but not both. This is also known as W^X.
415 ///
416 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
417 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
418 /// modified the pages, then executing the code may result in undefined behavior. To ensure
419 /// correct behavior a user has to flush the instruction cache after modifying and before
420 /// executing the page.
421 ///
422 /// In case of failure, this returns the ownership of `self`.
423 pub unsafe fn make_exec_mut(mut self) -> Result<MmapMut, (Self, Error)> {
424 if let Err(e) = self.inner.make_exec_mut() {
425 return Err((self, e));
426 }
427
428 Ok(MmapMut { inner: self.inner })
429 }
430 }
431 };
432}
433
434/// Represents an inaccessible memory mapping.
435#[derive(Debug)]
436pub struct MmapNone {
437 inner: platform::Mmap,
438}
439
440mmap_impl!(MmapNone);
441reserved_mmap_impl!(MmapNone);
442
443/// Represents an immutable memory mapping.
444#[derive(Debug)]
445pub struct Mmap {
446 inner: platform::Mmap,
447}
448
449mmap_impl!(Mmap);
450reserved_mmap_impl!(Mmap);
451
452impl Mmap {
453 /// Extracts a slice containing the entire mapping.
454 ///
455 /// This is equivalent to `&mapping[..]`.
456 #[inline]
457 pub fn as_slice(&self) -> &[u8] {
458 &self[..]
459 }
460}
461
462impl Deref for Mmap {
463 type Target = [u8];
464
465 fn deref(&self) -> &Self::Target {
466 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
467 }
468}
469
470impl AsRef<[u8]> for Mmap {
471 fn as_ref(&self) -> &[u8] {
472 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
473 }
474}
475
476/// Represents a mutable memory mapping.
477#[derive(Debug)]
478pub struct MmapMut {
479 inner: platform::Mmap,
480}
481
482mmap_impl!(MmapMut);
483reserved_mmap_impl!(MmapMut);
484
485impl MmapMut {
486 /// Extracts a slice containing the entire mapping.
487 ///
488 /// This is equivalent to `&mapping[..]`.
489 #[inline]
490 pub fn as_slice(&self) -> &[u8] {
491 &self[..]
492 }
493
494 /// Extracts a mutable slice containing the entire mapping.
495 ///
496 /// This is equivalent to `&mut mapping[..]`.
497 #[inline]
498 pub fn as_mut_slice(&mut self) -> &mut [u8] {
499 &mut self[..]
500 }
501}
502
503impl Deref for MmapMut {
504 type Target = [u8];
505
506 fn deref(&self) -> &Self::Target {
507 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
508 }
509}
510
511impl DerefMut for MmapMut {
512 fn deref_mut(&mut self) -> &mut Self::Target {
513 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
514 }
515}
516
517impl AsRef<[u8]> for MmapMut {
518 fn as_ref(&self) -> &[u8] {
519 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
520 }
521}
522
523impl AsMut<[u8]> for MmapMut {
524 fn as_mut(&mut self) -> &mut [u8] {
525 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
526 }
527}
528
529/// Represents the options for the memory mapping.
530#[derive(Debug)]
531pub struct MmapOptions<'a> {
532 inner: platform::MmapOptions<'a>,
533}
534
535impl<'a> MmapOptions<'a> {
536 /// Constructs the `MmapOptions` builder. The size specified is the size of the mapping to be
537 /// allocated in bytes.
538 pub fn new(size: usize) -> Result<Self, Error> {
539 Ok(Self {
540 inner: platform::MmapOptions::new(size)?,
541 })
542 }
543
544 /// Returns the smallest possible page size for the current platform. The allocation size must
545 /// be aligned to the page size for the allocation to succeed.
546 pub fn page_size() -> usize {
547 platform::MmapOptions::page_size()
548 }
549
550 /// Returns the set of supported page sizes for the current platform.
551 pub fn page_sizes() -> Result<PageSizes, Error> {
552 platform::MmapOptions::page_sizes()
553 }
554
555 /// Returns the allocation granularity for the current platform. On some platforms the
556 /// allocation granularity may be a multiple of the page size. The start address of the
557 /// allocation must be aligned to `max(allocation_granularity, page_size)`.
558 pub fn allocation_granularity() -> usize {
559 platform::MmapOptions::allocation_granularity()
560 }
561
562 /// The desired address at which the memory should be mapped.
563 pub fn with_address(self, address: usize) -> Self {
564 Self {
565 inner: self.inner.with_address(address),
566 }
567 }
568
569 /// Whether the memory mapping should be backed by a [`File`] or not. If the memory mapping
570 /// should be mapped by a [`File`], then the user can also specify the offset within the file
571 /// at which the mapping should start.
572 ///
573 /// On Microsoft Windows, it may not be possible to extend the protection beyond the access
574 /// mask that has been used to open the file. For instance, if a file has been opened with read
575 /// access, then [`Mmap::make_mut()`] will not work. Furthermore, [`std::fs::OpenOptions`] does
576 /// not in itself provide a standardized way to open the file with executable access. However,
577 /// if the file is not opened with executable access, then it may not be possible to use
578 /// [`Mmap::make_exec()`]. Fortunately, Rust provides [`OpenOptionsExt`] that allows you to
579 /// open the file with executable access rights. See [`access_mode`] for more information.
580 ///
581 /// # Safety
582 ///
583 /// This function is marked as **unsafe** as the user should be aware that even in the case
584 /// that a file is mapped as immutable in the address space of the current process, it does not
585 /// guarantee that there does not exist any other mutable mapping to the file.
586 ///
587 /// On Microsoft Windows, it is possible to limit the access to shared reading or to be fully
588 /// exclusive using [`share_mode`].
589 ///
590 /// On most Unix systems, it is possible to use [`nix::fcntl::flock`]. However, keep in mind
591 /// that this provides an **advisory** locking scheme, and that implementations are therefore
592 /// required to be co-operative.
593 ///
594 /// On Linux, it is also possible to mark the file as immutable. See `man 2 ioctl_iflags` and
595 /// `man 1 chattr` for more information.
596 ///
597 /// [`OpenOptionsExt`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html
598 /// [`access_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.access_mode
599 /// [`share_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.share_mode
600 /// [`nix::fcntl::flock`]: https://docs.rs/nix/latest/nix/fcntl/fn.flock.html
601 pub unsafe fn with_file(self, file: &'a File, offset: u64) -> Self {
602 Self {
603 inner: self.inner.with_file(file, offset),
604 }
605 }
606
607 /// The desired configuration of the mapping. See [`MmapFlags`] for available options.
608 pub fn with_flags(self, flags: MmapFlags) -> Self {
609 Self {
610 inner: self.inner.with_flags(flags),
611 }
612 }
613
614 /// The desired configuration of the mapping. See [`UnsafeMmapFlags`] for available options.
615 ///
616 /// # Safety
617 ///
618 /// The flags that can be passed to this function have unsafe behavior associated with them.
619 pub unsafe fn with_unsafe_flags(self, flags: UnsafeMmapFlags) -> Self {
620 Self {
621 inner: self.inner.with_unsafe_flags(flags),
622 }
623 }
624
625 /// Whether this memory mapped should be backed by a specific page size or not.
626 pub fn with_page_size(self, page_size: PageSize) -> Self {
627 Self {
628 inner: self.inner.with_page_size(page_size),
629 }
630 }
631
632 /// Reserves inaccessible memory.
633 pub fn reserve_none(self) -> Result<ReservedNone, Error> {
634 Ok(ReservedNone {
635 inner: self.inner.reserve_none()?,
636 })
637 }
638
639 /// Reserves immutable memory.
640 pub fn reserve(self) -> Result<Reserved, Error> {
641 Ok(Reserved {
642 inner: self.inner.reserve()?,
643 })
644 }
645
646 /// Reserves executable memory.
647 pub fn reserve_exec(self) -> Result<Reserved, Error> {
648 Ok(Reserved {
649 inner: self.inner.reserve_exec()?,
650 })
651 }
652
653 /// Reserves mutable memory.
654 pub fn reserve_mut(self) -> Result<ReservedMut, Error> {
655 Ok(ReservedMut {
656 inner: self.inner.reserve_mut()?,
657 })
658 }
659
660 /// Reserves executable and mutable memory.
661 ///
662 /// # Safety
663 ///
664 /// See [`MmapOptions::map_exec_mut`] for more information.
665 pub unsafe fn reserve_exec_mut(self) -> Result<ReservedMut, Error> {
666 Ok(ReservedMut {
667 inner: self.inner.reserve_exec_mut()?,
668 })
669 }
670
671 /// Maps the memory as inaccessible.
672 pub fn map_none(self) -> Result<MmapNone, Error> {
673 Ok(MmapNone {
674 inner: self.inner.map_none()?,
675 })
676 }
677
678 /// Maps the memory as immutable.
679 pub fn map(self) -> Result<Mmap, Error> {
680 Ok(Mmap {
681 inner: self.inner.map()?,
682 })
683 }
684
685 /// Maps the memory as executable.
686 pub fn map_exec(self) -> Result<Mmap, Error> {
687 Ok(Mmap {
688 inner: self.inner.map_exec()?,
689 })
690 }
691
692 /// Maps the memory as mutable.
693 pub fn map_mut(self) -> Result<MmapMut, Error> {
694 Ok(MmapMut {
695 inner: self.inner.map_mut()?,
696 })
697 }
698
699 /// Maps the memory as executable and mutable. While this may seem useful for self-modifying
700 /// code and JIT engines, it is instead recommended to convert between mutable and executable
701 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
702 ///
703 /// Make sure to read the text below to understand the complications of this function before
704 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
705 ///
706 /// # Safety
707 ///
708 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
709 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
710 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
711 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
712 /// the operating system to ensure that pages are either marked as writeable or as executable,
713 /// but not both. This is also known as W^X.
714 ///
715 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
716 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
717 /// modified the pages, then executing the code may result in undefined behavior. To ensure
718 /// correct behavior a user has to flush the instruction cache after modifying and before
719 /// executing the page.
720 pub unsafe fn map_exec_mut(self) -> Result<MmapMut, Error> {
721 Ok(MmapMut {
722 inner: self.inner.map_exec_mut()?,
723 })
724 }
725
726 /// Creates a mapping from an existing address/size combination.
727 /// Before calling this, call [`Mmap::with_address()`] to set an address.
728 ///
729 /// # Returns
730 ///
731 /// An error if an address was not previously specified with a call to [`Mmap::with_address()`]
732 ///
733 /// # Remarks
734 ///
735 /// This function 'takes ownership' of an existing mapping. This means that the mapping will
736 /// be dropped when the returned object is dropped. i.e. munmap/VirtualFree/<YOUR_PLATFORM_EQUIVALENT>
737 /// will be called when the item is dropped.
738 ///
739 /// If you want to avoid freeing, please call [`core::mem::forget`] on the resulting object.
740 pub unsafe fn map_from_existing(self) -> Result<MmapMut, Error> {
741 Ok(MmapMut {
742 inner: self.inner.map_from_existing()?,
743 })
744 }
745}
746
747macro_rules! reserved_impl {
748 ($t:ident) => {
749 impl $t {
750 /// Returns `true` if the memory mapping is size 0.
751 #[inline]
752 pub fn is_empty(&self) -> bool {
753 self.inner.size() == 0
754 }
755
756 /// Yields the length of this mapping.
757 #[inline]
758 pub fn len(&self) -> usize {
759 self.inner.size()
760 }
761
762 /// Remaps this memory mapping as inaccessible.
763 ///
764 /// In case of failure, this returns the ownership of `self`.
765 pub fn make_none(mut self) -> Result<ReservedNone, (Self, Error)> {
766 if let Err(e) = self.inner.make_none() {
767 return Err((self, e));
768 }
769
770 Ok(ReservedNone { inner: self.inner })
771 }
772
773 /// Remaps this memory mapping as immutable.
774 ///
775 /// In case of failure, this returns the ownership of `self`.
776 pub fn make_read_only(mut self) -> Result<Reserved, (Self, Error)> {
777 if let Err(e) = self.inner.make_read_only() {
778 return Err((self, e));
779 }
780
781 Ok(Reserved { inner: self.inner })
782 }
783
784 /// Remaps this memory mapping as executable.
785 ///
786 /// In case of failure, this returns the ownership of `self`.
787 pub fn make_exec(mut self) -> Result<Reserved, (Self, Error)> {
788 if let Err(e) = self.inner.make_exec() {
789 return Err((self, e));
790 }
791
792 if let Err(e) = self.inner.flush_icache() {
793 return Err((self, e));
794 }
795
796 Ok(Reserved { inner: self.inner })
797 }
798
799 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
800 ///
801 /// # Safety
802 ///
803 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
804 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
805 /// modified the pages, then executing the code may result in undefined behavior. To ensure
806 /// correct behavior a user has to flush the instruction cache after modifying and before
807 /// executing the page.
808 ///
809 /// In case of failure, this returns the ownership of `self`.
810 pub unsafe fn make_exec_no_flush(mut self) -> Result<Reserved, (Self, Error)> {
811 if let Err(e) = self.inner.make_exec() {
812 return Err((self, e));
813 }
814
815 Ok(Reserved { inner: self.inner })
816 }
817
818 /// Remaps this mapping to be mutable.
819 ///
820 /// In case of failure, this returns the ownership of `self`.
821 pub fn make_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
822 if let Err(e) = self.inner.make_mut() {
823 return Err((self, e));
824 }
825
826 Ok(ReservedMut { inner: self.inner })
827 }
828
829 /// Remaps this mapping to be executable and mutable.
830 ///
831 /// While this may seem useful for self-modifying
832 /// code and JIT engines, it is instead recommended to convert between mutable and executable
833 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
834 ///
835 /// Make sure to read the text below to understand the complications of this function before
836 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
837 ///
838 /// # Safety
839 ///
840 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
841 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
842 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
843 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
844 /// the operating system to ensure that pages are either marked as writeable or as executable,
845 /// but not both. This is also known as W^X.
846 ///
847 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
848 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
849 /// modified the pages, then executing the code may result in undefined behavior. To ensure
850 /// correct behavior a user has to flush the instruction cache after modifying and before
851 /// executing the page.
852 ///
853 /// In case of failure, this returns the ownership of `self`.
854 pub unsafe fn make_exec_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
855 if let Err(e) = self.inner.make_exec_mut() {
856 return Err((self, e));
857 }
858
859 Ok(ReservedMut { inner: self.inner })
860 }
861 }
862 };
863}
864
865/// Represents an inaccessible memory mapping in a reserved state, i.e. a memory mapping that is not
866/// backed by any physical pages yet.
867#[derive(Debug)]
868pub struct ReservedNone {
869 inner: platform::Mmap,
870}
871
872reserved_impl!(ReservedNone);
873reserved_mmap_impl!(ReservedNone);
874
875impl TryInto<MmapNone> for ReservedNone {
876 type Error = Error;
877
878 fn try_into(mut self) -> Result<MmapNone, Error> {
879 self.inner.commit()?;
880
881 Ok(MmapNone { inner: self.inner })
882 }
883}
884
885/// Represents an immutable memory mapping in a reserved state, i.e. a memory mapping that is not
886/// backed by any physical pages yet.
887#[derive(Debug)]
888pub struct Reserved {
889 inner: platform::Mmap,
890}
891
892reserved_impl!(Reserved);
893reserved_mmap_impl!(Reserved);
894
895impl TryInto<Mmap> for Reserved {
896 type Error = Error;
897
898 fn try_into(mut self) -> Result<Mmap, Error> {
899 self.inner.commit()?;
900
901 Ok(Mmap { inner: self.inner })
902 }
903}
904
905/// Represents a mutable memory mapping in a reserved state, i.e. a memory mapping that is not
906/// backed by any physical pages yet.
907#[derive(Debug)]
908pub struct ReservedMut {
909 inner: platform::Mmap,
910}
911
912reserved_impl!(ReservedMut);
913reserved_mmap_impl!(ReservedMut);
914
915impl TryInto<MmapMut> for ReservedMut {
916 type Error = Error;
917
918 fn try_into(mut self) -> Result<MmapMut, Error> {
919 self.inner.commit()?;
920
921 Ok(MmapMut { inner: self.inner })
922 }
923}