mmap_rs/mmap.rs
1use crate::areas::MemoryAreas;
2use crate::error::Error;
3use bitflags::bitflags;
4use std::fs::File;
5use std::ops::{Deref, DerefMut, Range};
6
7#[cfg(unix)]
8use crate::os_impl::unix as platform;
9
10#[cfg(windows)]
11use crate::os_impl::windows as platform;
12
13bitflags! {
14 /// The available flags to configure the allocated mapping.
15 pub struct MmapFlags: u32 {
16 /// Maps the pages as shared such that any modifcations are visible between processes.
17 ///
18 /// When mapping a file without specifying this flag, the pages may initially be mapped as
19 /// shared, but a private copy will be created when any process writes to the memory
20 /// mapping, such that any modification is not visible to other processes.
21 const SHARED = 1 << 0;
22
23 /// Ensure the allocated pages are populated, such that they do not cause page faults.
24 const POPULATE = 1 << 1;
25
26 /// Do not reserve swap space for this allocation.
27 ///
28 /// This flag acts as a no-op on platforms that do not support this feature.
29 const NO_RESERVE = 1 << 2;
30
31 /// Use huge pages for this allocation.
32 const HUGE_PAGES = 1 << 3;
33
34 /// The region grows downward like a stack on certain Unix platforms (e.g. FreeBSD).
35 ///
36 /// This flag acts as a no-op on platforms that do not support this feature.
37 const STACK = 1 << 4;
38
39 /// The pages will not be included in a core dump.
40 ///
41 /// This flag acts as a no-op on platforms that do not support this feature.
42 const NO_CORE_DUMP = 1 << 5;
43
44 /// Lock the physical memory to prevent page faults from happening when accessing the
45 /// pages.
46 const LOCKED = 1 << 6;
47
48 /// Suggest to use transparent huge pages for this allocation by calling `madvise()`.
49 ///
50 /// This flag acts as a no-op on platforms that do not support this feature.
51 const TRANSPARENT_HUGE_PAGES = 1 << 7;
52
53 /// Suggest that the mapped region will be accessed sequentially by calling `madvise()`.
54 ///
55 /// This flag acts as a no-op on platforms that do not support this feature.
56 const SEQUENTIAL = 1 << 8;
57
58 /// Suggest that the mapped region will be accessed randomly by calling `madvise()`.
59 ///
60 /// This flag acts as a no-op on platforms that do not support this feature.
61 const RANDOM_ACCESS = 1 << 9;
62 }
63
64 /// The available flags to configure the allocated mapping, but that are considered unsafe to
65 /// use.
66 pub struct UnsafeMmapFlags: u32 {
67 /// Maps the memory mapping at the address specified, replacing any pages that have been
68 /// mapped at that address range.
69 ///
70 /// This is not supported on Microsoft Windows.
71 const MAP_FIXED = 1 << 0;
72
73 /// Allows mapping the page as RWX. While this may seem useful for self-modifying code and
74 /// JIT engines, it is instead recommended to convert between mutable and executable
75 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
76 ///
77 /// As it may be tempting to use this flag, this flag has been (indirectly) marked as
78 /// **unsafe**. Make sure to read the text below to understand the complications of this
79 /// flag before using it.
80 ///
81 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as
82 /// RWX mappings can potentially simplify such attacks. Without RWX mappings, attackers
83 /// instead have to resort to return-oriented programming (ROP) gadgets. To prevent buffer
84 /// overflow attacks, contemporary CPUs allow pages to be marked as non-executable which is
85 /// then used by the operating system to ensure that pages are either marked as writeable
86 /// or as executable, but not both. This is also known as W^X.
87 ///
88 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1
89 /// instruction and the L1 data cache, other architectures such as Arm and AArch64 do not.
90 /// If the user modified the pages, then executing the code may result in undefined
91 /// behavior. To ensure correct behavior a user has to flush the instruction cache after
92 /// modifying and before executing the page.
93 const JIT = 1 << 1;
94 }
95
96 /// A set of (supported) page sizes.
97 pub struct PageSizes: usize {
98 /// 4 KiB pages.
99 const _4K = 1 << 12;
100 /// 8 KiB pages.
101 const _8K = 1 << 13;
102 /// 16 KiB pages.
103 const _16K = 1 << 14;
104 /// 32 KiB pages.
105 const _32K = 1 << 15;
106 /// 64 KiB pages.
107 const _64K = 1 << 16;
108 /// 128 KiB pages.
109 const _128K = 1 << 17;
110 /// 256 KiB pages.
111 const _256K = 1 << 18;
112 /// 512 KiB pages.
113 const _512K = 1 << 19;
114 /// 1 MiB pages.
115 const _1M = 1 << 20;
116 /// 2 MiB pages.
117 const _2M = 1 << 21;
118 /// 4 MiB pages.
119 const _4M = 1 << 22;
120 /// 8 MiB pages.
121 const _8M = 1 << 23;
122 /// 16 MiB pages.
123 const _16M = 1 << 24;
124 /// 32 MiB pages.
125 const _32M = 1 << 25;
126 /// 64 MiB pages.
127 const _64M = 1 << 26;
128 /// 128 MiB pages.
129 const _128M = 1 << 27;
130 /// 256 MiB pages.
131 const _256M = 1 << 28;
132 /// 512 MiB pages.
133 const _512M = 1 << 29;
134 /// 1 GiB pages.
135 const _1G = 1 << 30;
136 /// 2 GiB pages.
137 const _2G = 1 << 31;
138 #[cfg(target_pointer_width = "64")]
139 /// 4 GiB pages.
140 const _4G = 1 << 32;
141 #[cfg(target_pointer_width = "64")]
142 /// 8 GiB pages.
143 const _8G = 1 << 33;
144 #[cfg(target_pointer_width = "64")]
145 /// 16 GiB pages.
146 const _16G = 1 << 34;
147 }
148}
149
150/// The preferred size of the pages uses, where the size is in log2 notation.
151///
152/// Note that not all the offered page sizes may be available on the current platform.
153#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
154pub struct PageSize(pub usize);
155
156impl PageSize {
157 /// Map the mapping using 4 KiB pages.
158 pub const _4K: Self = Self(12);
159 /// Map the mapping using 64 KiB pages.
160 pub const _64K: Self = Self(16);
161 /// Map the mapping using 512 KiB pages.
162 pub const _512K: Self = Self(19);
163 /// Map the mapping using 1 MiB pages.
164 pub const _1M: Self = Self(20);
165 /// Map the mapping using 2 MiB pages.
166 pub const _2M: Self = Self(21);
167 /// Map the mapping using 4 MiB pages.
168 pub const _4M: Self = Self(22);
169 /// Map the mapping using 8 MiB pages.
170 pub const _8M: Self = Self(23);
171 /// Map the mapping using 16 MiB pages.
172 pub const _16M: Self = Self(24);
173 /// Map the mapping using 32 MiB pages.
174 pub const _32M: Self = Self(25);
175 /// Map the mapping using 256 MiB pages.
176 pub const _256M: Self = Self(28);
177 /// Map the mapping using 512 MiB pages.
178 pub const _512M: Self = Self(29);
179 /// Map the mapping using 1 GiB pages.
180 pub const _1G: Self = Self(30);
181 /// Map the mapping using 2 GiB pages.
182 pub const _2G: Self = Self(31);
183 /// Map the mapping using 16 GiB pages.
184 pub const _16G: Self = Self(34);
185}
186
187impl TryFrom<PageSizes> for PageSize {
188 type Error = Error;
189
190 fn try_from(page_sizes: PageSizes) -> Result<PageSize, Error> {
191 if page_sizes.bits().count_ones() != 1 {
192 return Err(Error::InvalidSize);
193 }
194
195 Ok(PageSize(page_sizes.bits()))
196 }
197}
198
199macro_rules! reserved_mmap_impl {
200 ($t:ident) => {
201 impl $t {
202 /// Returns the start address of this mapping.
203 #[inline]
204 pub fn start(&self) -> usize {
205 self.inner.as_ptr() as usize
206 }
207
208 /// Returns the end address of this mapping.
209 #[inline]
210 pub fn end(&self) -> usize {
211 self.start() + self.size()
212 }
213
214 /// Yields a raw immutable pointer of this mapping.
215 #[inline]
216 pub fn as_ptr(&self) -> *const u8 {
217 self.inner.as_ptr()
218 }
219
220 /// Yields a raw mutable pointer of this mapping.
221 #[inline]
222 pub fn as_mut_ptr(&mut self) -> *mut u8 {
223 self.inner.as_mut_ptr()
224 }
225
226 /// Yields the size of this mapping.
227 #[inline]
228 pub fn size(&self) -> usize {
229 self.inner.size()
230 }
231
232 /// Merges the memory maps into one. The memory maps must be adjacent to each other and
233 /// share the same attributes and backing. On success, this consumes the other memory map
234 /// object. Otherwise, this returns an error together with the original memory map that
235 /// failed to be merged.
236 pub fn merge(&mut self, other: Self) -> Result<(), (Error, Self)> {
237 // Ensure the memory maps are adjacent.
238 if self.end() != other.start() {
239 return Err((Error::MustBeAdjacent, other));
240 }
241
242 // Ensure the protection attributes match.
243 let region = match MemoryAreas::query(self.start()) {
244 Ok(Some(region)) => region,
245 Ok(None) => return Err((Error::AttributeMismatch, other)),
246 Err(e) => return Err((e, other)),
247 };
248
249 let other_region = match MemoryAreas::query(other.start()) {
250 Ok(Some(region)) => region,
251 Ok(None) => return Err((Error::AttributeMismatch, other)),
252 Err(e) => return Err((e, other)),
253 };
254
255 if region.protection != other_region.protection {
256 return Err((Error::AttributeMismatch, other));
257 }
258
259 if let Err(e) = self.inner.merge(&other.inner) {
260 return Err((e, other));
261 }
262
263 std::mem::forget(other);
264
265 Ok(())
266 }
267
268 /// Splits the memory map into two at the given byte offset. The byte offset must be
269 /// page size aligned.
270 ///
271 /// Afterwards `self` is limited to the range `[0, at)`, and the returning memory
272 /// mapping is limited to `[at, len)`.
273 pub fn split_off(&mut self, at: usize) -> Result<Self, Error> {
274 let inner = self.inner.split_off(at)?;
275
276 Ok(Self { inner })
277 }
278
279 /// Splits the memory map into two at the given byte offset. The byte offset must be
280 /// page size aligned.
281 ///
282 /// Afterwards `self` is limited to the range `[at, len)`, and the returning memory
283 /// mapping is limited to `[0, at)`.
284 pub fn split_to(&mut self, at: usize) -> Result<Self, Error> {
285 let inner = self.inner.split_to(at)?;
286
287 Ok(Self { inner })
288 }
289 }
290 };
291}
292
293macro_rules! mmap_impl {
294 ($t:ident) => {
295 impl $t {
296 /// Locks the physical pages in memory such that accessing the mapping causes no page faults.
297 pub fn lock(&mut self) -> Result<(), Error> {
298 self.inner.lock()
299 }
300
301 /// Unlocks the physical pages in memory, allowing the operating system to swap out the pages
302 /// backing this memory mapping.
303 pub fn unlock(&mut self) -> Result<(), Error> {
304 self.inner.unlock()
305 }
306
307 /// Flushes a range of the memory mapping, i.e. this initiates writing dirty pages
308 /// within that range to the disk. Dirty pages are those whose contents have changed
309 /// since the file was mapped.
310 ///
311 /// On Microsoft Windows, this function does not flush the file metadata. Thus, it must
312 /// be followed with a call to [`File::sync_all`] to flush the file metadata. This also
313 /// causes the flush operaton to be synchronous.
314 ///
315 /// On other platforms, the flush operation is synchronous, i.e. this waits until the
316 /// flush operation completes.
317 pub fn flush(&self, range: Range<usize>) -> Result<(), Error> {
318 self.inner.flush(range)
319 }
320
321 /// Flushes a range of the memory mapping asynchronously, i.e. this initiates writing
322 /// dirty pages within that range to the disk without waiting for the flush operation
323 /// to complete. Dirty pages are those whose contents have changed since the file was
324 /// mapped.
325 pub fn flush_async(&self, range: Range<usize>) -> Result<(), Error> {
326 self.inner.flush_async(range)
327 }
328
329 /// This function can be used to flush the instruction cache on architectures where
330 /// this is required.
331 ///
332 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
333 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
334 /// modified the pages, then executing the code may result in undefined behavior. To ensure
335 /// correct behavior a user has to flush the instruction cache after modifying and before
336 /// executing the page.
337 pub fn flush_icache(&self) -> Result<(), Error> {
338 self.inner.flush_icache()
339 }
340
341 /// Remaps this memory mapping as inaccessible.
342 ///
343 /// In case of failure, this returns the ownership of `self`.
344 pub fn make_none(mut self) -> Result<MmapNone, (Self, Error)> {
345 if let Err(e) = self.inner.make_none() {
346 return Err((self, e));
347 }
348
349 Ok(MmapNone { inner: self.inner })
350 }
351
352 /// Remaps this memory mapping as immutable.
353 ///
354 /// In case of failure, this returns the ownership of `self`. If you are
355 /// not interested in this feature, you can use the implementation of
356 /// the [`TryFrom`] trait instead.
357 pub fn make_read_only(mut self) -> Result<Mmap, (Self, Error)> {
358 if let Err(e) = self.inner.make_read_only() {
359 return Err((self, e));
360 }
361
362 Ok(Mmap { inner: self.inner })
363 }
364
365 /// Remaps this memory mapping as executable.
366 ///
367 /// In case of failure, this returns the ownership of `self`.
368 pub fn make_exec(mut self) -> Result<Mmap, (Self, Error)> {
369 if let Err(e) = self.inner.make_exec() {
370 return Err((self, e));
371 }
372
373 if let Err(e) = self.inner.flush_icache() {
374 return Err((self, e));
375 }
376
377 Ok(Mmap { inner: self.inner })
378 }
379
380 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
381 ///
382 /// # Safety
383 ///
384 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
385 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
386 /// modified the pages, then executing the code may result in undefined behavior. To ensure
387 /// correct behavior a user has to flush the instruction cache after modifying and before
388 /// executing the page.
389 ///
390 /// In case of failure, this returns the ownership of `self`.
391 pub unsafe fn make_exec_no_flush(mut self) -> Result<Mmap, (Self, Error)> {
392 if let Err(e) = self.inner.make_exec() {
393 return Err((self, e));
394 }
395
396 Ok(Mmap { inner: self.inner })
397 }
398
399 /// Remaps this mapping to be mutable.
400 ///
401 /// In case of failure, this returns the ownership of `self`. If you are
402 /// not interested in this feature, you can use the implementation of
403 /// the [`TryFrom`] trait instead.
404 pub fn make_mut(mut self) -> Result<MmapMut, (Self, Error)> {
405 if let Err(e) = self.inner.make_mut() {
406 return Err((self, e));
407 }
408
409 Ok(MmapMut { inner: self.inner })
410 }
411
412 /// Remaps this mapping to be executable and mutable.
413 ///
414 /// While this may seem useful for self-modifying
415 /// code and JIT engines, it is instead recommended to convert between mutable and executable
416 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
417 ///
418 /// Make sure to read the text below to understand the complications of this function before
419 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
420 ///
421 /// # Safety
422 ///
423 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
424 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
425 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
426 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
427 /// the operating system to ensure that pages are either marked as writeable or as executable,
428 /// but not both. This is also known as W^X.
429 ///
430 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
431 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
432 /// modified the pages, then executing the code may result in undefined behavior. To ensure
433 /// correct behavior a user has to flush the instruction cache after modifying and before
434 /// executing the page.
435 ///
436 /// In case of failure, this returns the ownership of `self`.
437 pub unsafe fn make_exec_mut(mut self) -> Result<MmapMut, (Self, Error)> {
438 if let Err(e) = self.inner.make_exec_mut() {
439 return Err((self, e));
440 }
441
442 Ok(MmapMut { inner: self.inner })
443 }
444 }
445 };
446}
447
448/// Represents an inaccessible memory mapping.
449#[derive(Debug)]
450pub struct MmapNone {
451 inner: platform::Mmap,
452}
453
454mmap_impl!(MmapNone);
455reserved_mmap_impl!(MmapNone);
456
457/// Represents an immutable memory mapping.
458#[derive(Debug)]
459pub struct Mmap {
460 inner: platform::Mmap,
461}
462
463mmap_impl!(Mmap);
464reserved_mmap_impl!(Mmap);
465
466impl Mmap {
467 /// Extracts a slice containing the entire mapping.
468 ///
469 /// This is equivalent to `&mapping[..]`.
470 #[inline]
471 pub fn as_slice(&self) -> &[u8] {
472 &self[..]
473 }
474}
475
476impl Deref for Mmap {
477 type Target = [u8];
478
479 fn deref(&self) -> &Self::Target {
480 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
481 }
482}
483
484impl AsRef<[u8]> for Mmap {
485 fn as_ref(&self) -> &[u8] {
486 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
487 }
488}
489
490impl TryFrom<MmapMut> for Mmap {
491 type Error = Error;
492 fn try_from(mmap_mut: MmapMut) -> Result<Self, Self::Error> {
493 match mmap_mut.make_read_only() {
494 Ok(mmap) => Ok(mmap),
495 Err((_, e)) => Err(e),
496 }
497 }
498}
499
500impl TryFrom<MmapNone> for Mmap {
501 type Error = Error;
502 fn try_from(mmap_none: MmapNone) -> Result<Self, Self::Error> {
503 match mmap_none.make_read_only() {
504 Ok(mmap) => Ok(mmap),
505 Err((_, e)) => Err(e),
506 }
507 }
508}
509
510/// Represents a mutable memory mapping.
511#[derive(Debug)]
512pub struct MmapMut {
513 inner: platform::Mmap,
514}
515
516mmap_impl!(MmapMut);
517reserved_mmap_impl!(MmapMut);
518
519impl MmapMut {
520 /// Extracts a slice containing the entire mapping.
521 ///
522 /// This is equivalent to `&mapping[..]`.
523 #[inline]
524 pub fn as_slice(&self) -> &[u8] {
525 &self[..]
526 }
527
528 /// Extracts a mutable slice containing the entire mapping.
529 ///
530 /// This is equivalent to `&mut mapping[..]`.
531 #[inline]
532 pub fn as_mut_slice(&mut self) -> &mut [u8] {
533 &mut self[..]
534 }
535}
536
537impl TryFrom<Mmap> for MmapMut {
538 type Error = Error;
539 fn try_from(mmap: Mmap) -> Result<Self, Self::Error> {
540 match mmap.make_mut() {
541 Ok(mmap_mut) => Ok(mmap_mut),
542 Err((_, e)) => Err(e),
543 }
544 }
545}
546
547impl TryFrom<MmapNone> for MmapMut {
548 type Error = Error;
549 fn try_from(mmap_none: MmapNone) -> Result<Self, Self::Error> {
550 match mmap_none.make_mut() {
551 Ok(mmap_mut) => Ok(mmap_mut),
552 Err((_, e)) => Err(e),
553 }
554 }
555}
556
557impl Deref for MmapMut {
558 type Target = [u8];
559
560 fn deref(&self) -> &Self::Target {
561 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
562 }
563}
564
565impl DerefMut for MmapMut {
566 fn deref_mut(&mut self) -> &mut Self::Target {
567 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
568 }
569}
570
571impl AsRef<[u8]> for MmapMut {
572 fn as_ref(&self) -> &[u8] {
573 unsafe { std::slice::from_raw_parts(self.as_ptr(), self.size()) }
574 }
575}
576
577impl AsMut<[u8]> for MmapMut {
578 fn as_mut(&mut self) -> &mut [u8] {
579 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
580 }
581}
582
583/// Represents the options for the memory mapping.
584#[derive(Debug)]
585pub struct MmapOptions<'a> {
586 inner: platform::MmapOptions<'a>,
587}
588
589impl<'a> MmapOptions<'a> {
590 /// Constructs the `MmapOptions` builder. The size specified is the size of the mapping to be
591 /// allocated in bytes.
592 pub fn new(size: usize) -> Result<Self, Error> {
593 Ok(Self {
594 inner: platform::MmapOptions::new(size)?,
595 })
596 }
597
598 /// Returns the smallest possible page size for the current platform. The allocation size must
599 /// be aligned to the page size for the allocation to succeed.
600 pub fn page_size() -> usize {
601 platform::MmapOptions::page_size()
602 }
603
604 /// Returns the set of supported page sizes for the current platform.
605 pub fn page_sizes() -> Result<PageSizes, Error> {
606 platform::MmapOptions::page_sizes()
607 }
608
609 /// Returns the allocation granularity for the current platform. On some platforms the
610 /// allocation granularity may be a multiple of the page size. The start address of the
611 /// allocation must be aligned to `max(allocation_granularity, page_size)`.
612 pub fn allocation_granularity() -> usize {
613 platform::MmapOptions::allocation_granularity()
614 }
615
616 /// The desired address at which the memory should be mapped.
617 pub fn with_address(self, address: usize) -> Self {
618 Self {
619 inner: self.inner.with_address(address),
620 }
621 }
622
623 /// Whether the memory mapping should be backed by a [`File`] or not. If the memory mapping
624 /// should be mapped by a [`File`], then the user can also specify the offset within the file
625 /// at which the mapping should start.
626 ///
627 /// On Microsoft Windows, it may not be possible to extend the protection beyond the access
628 /// mask that has been used to open the file. For instance, if a file has been opened with read
629 /// access, then [`Mmap::make_mut()`] will not work. Furthermore, [`std::fs::OpenOptions`] does
630 /// not in itself provide a standardized way to open the file with executable access. However,
631 /// if the file is not opened with executable access, then it may not be possible to use
632 /// [`Mmap::make_exec()`]. Fortunately, Rust provides [`OpenOptionsExt`] that allows you to
633 /// open the file with executable access rights. See [`access_mode`] for more information.
634 ///
635 /// # Safety
636 ///
637 /// This function is marked as **unsafe** as the user should be aware that even in the case
638 /// that a file is mapped as immutable in the address space of the current process, it does not
639 /// guarantee that there does not exist any other mutable mapping to the file.
640 ///
641 /// On Microsoft Windows, it is possible to limit the access to shared reading or to be fully
642 /// exclusive using [`share_mode`].
643 ///
644 /// On most Unix systems, it is possible to use [`nix::fcntl::flock`]. However, keep in mind
645 /// that this provides an **advisory** locking scheme, and that implementations are therefore
646 /// required to be co-operative.
647 ///
648 /// On Linux, it is also possible to mark the file as immutable. See `man 2 ioctl_iflags` and
649 /// `man 1 chattr` for more information.
650 ///
651 /// [`OpenOptionsExt`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html
652 /// [`access_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.access_mode
653 /// [`share_mode`]: https://doc.rust-lang.org/std/os/windows/fs/trait.OpenOptionsExt.html#tymethod.share_mode
654 /// [`nix::fcntl::flock`]: https://docs.rs/nix/latest/nix/fcntl/fn.flock.html
655 pub unsafe fn with_file(self, file: &'a File, offset: u64) -> Self {
656 Self {
657 inner: self.inner.with_file(file, offset),
658 }
659 }
660
661 /// The desired configuration of the mapping. See [`MmapFlags`] for available options.
662 pub fn with_flags(self, flags: MmapFlags) -> Self {
663 Self {
664 inner: self.inner.with_flags(flags),
665 }
666 }
667
668 /// The desired configuration of the mapping. See [`UnsafeMmapFlags`] for available options.
669 ///
670 /// # Safety
671 ///
672 /// The flags that can be passed to this function have unsafe behavior associated with them.
673 pub unsafe fn with_unsafe_flags(self, flags: UnsafeMmapFlags) -> Self {
674 Self {
675 inner: self.inner.with_unsafe_flags(flags),
676 }
677 }
678
679 /// Whether this memory mapped should be backed by a specific page size or not.
680 pub fn with_page_size(self, page_size: PageSize) -> Self {
681 Self {
682 inner: self.inner.with_page_size(page_size),
683 }
684 }
685
686 /// Reserves inaccessible memory.
687 pub fn reserve_none(self) -> Result<ReservedNone, Error> {
688 Ok(ReservedNone {
689 inner: self.inner.reserve_none()?,
690 })
691 }
692
693 /// Reserves immutable memory.
694 pub fn reserve(self) -> Result<Reserved, Error> {
695 Ok(Reserved {
696 inner: self.inner.reserve()?,
697 })
698 }
699
700 /// Reserves executable memory.
701 pub fn reserve_exec(self) -> Result<Reserved, Error> {
702 Ok(Reserved {
703 inner: self.inner.reserve_exec()?,
704 })
705 }
706
707 /// Reserves mutable memory.
708 pub fn reserve_mut(self) -> Result<ReservedMut, Error> {
709 Ok(ReservedMut {
710 inner: self.inner.reserve_mut()?,
711 })
712 }
713
714 /// Reserves executable and mutable memory.
715 ///
716 /// # Safety
717 ///
718 /// See [`MmapOptions::map_exec_mut`] for more information.
719 pub unsafe fn reserve_exec_mut(self) -> Result<ReservedMut, Error> {
720 Ok(ReservedMut {
721 inner: self.inner.reserve_exec_mut()?,
722 })
723 }
724
725 /// Maps the memory as inaccessible.
726 pub fn map_none(self) -> Result<MmapNone, Error> {
727 Ok(MmapNone {
728 inner: self.inner.map_none()?,
729 })
730 }
731
732 /// Maps the memory as immutable.
733 pub fn map(self) -> Result<Mmap, Error> {
734 Ok(Mmap {
735 inner: self.inner.map()?,
736 })
737 }
738
739 /// Maps the memory as executable.
740 pub fn map_exec(self) -> Result<Mmap, Error> {
741 Ok(Mmap {
742 inner: self.inner.map_exec()?,
743 })
744 }
745
746 /// Maps the memory as mutable.
747 pub fn map_mut(self) -> Result<MmapMut, Error> {
748 Ok(MmapMut {
749 inner: self.inner.map_mut()?,
750 })
751 }
752
753 /// Maps the memory as executable and mutable. While this may seem useful for self-modifying
754 /// code and JIT engines, it is instead recommended to convert between mutable and executable
755 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
756 ///
757 /// Make sure to read the text below to understand the complications of this function before
758 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
759 ///
760 /// # Safety
761 ///
762 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
763 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
764 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
765 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
766 /// the operating system to ensure that pages are either marked as writeable or as executable,
767 /// but not both. This is also known as W^X.
768 ///
769 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
770 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
771 /// modified the pages, then executing the code may result in undefined behavior. To ensure
772 /// correct behavior a user has to flush the instruction cache after modifying and before
773 /// executing the page.
774 pub unsafe fn map_exec_mut(self) -> Result<MmapMut, Error> {
775 Ok(MmapMut {
776 inner: self.inner.map_exec_mut()?,
777 })
778 }
779}
780
781macro_rules! reserved_impl {
782 ($t:ident) => {
783 impl $t {
784 /// Returns `true` if the memory mapping is size 0.
785 #[inline]
786 pub fn is_empty(&self) -> bool {
787 self.inner.size() == 0
788 }
789
790 /// Yields the length of this mapping.
791 #[inline]
792 pub fn len(&self) -> usize {
793 self.inner.size()
794 }
795
796 /// Remaps this memory mapping as inaccessible.
797 ///
798 /// In case of failure, this returns the ownership of `self`. If you are
799 /// not interested in this feature, you can use the implementation of
800 /// the [`TryFrom`] trait instead.
801 pub fn make_none(mut self) -> Result<ReservedNone, (Self, Error)> {
802 if let Err(e) = self.inner.make_none() {
803 return Err((self, e));
804 }
805
806 Ok(ReservedNone { inner: self.inner })
807 }
808
809 /// Remaps this memory mapping as immutable.
810 ///
811 /// In case of failure, this returns the ownership of `self`. If you are
812 /// not interested in this feature, you can use the implementation of
813 /// the [`TryFrom`] trait instead.
814 pub fn make_read_only(mut self) -> Result<Reserved, (Self, Error)> {
815 if let Err(e) = self.inner.make_read_only() {
816 return Err((self, e));
817 }
818
819 Ok(Reserved { inner: self.inner })
820 }
821
822 /// Remaps this memory mapping as executable.
823 ///
824 /// In case of failure, this returns the ownership of `self`.
825 pub fn make_exec(mut self) -> Result<Reserved, (Self, Error)> {
826 if let Err(e) = self.inner.make_exec() {
827 return Err((self, e));
828 }
829
830 if let Err(e) = self.inner.flush_icache() {
831 return Err((self, e));
832 }
833
834 Ok(Reserved { inner: self.inner })
835 }
836
837 /// Remaps this memory mapping as executable, but does not flush the instruction cache.
838 ///
839 /// # Safety
840 ///
841 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
842 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
843 /// modified the pages, then executing the code may result in undefined behavior. To ensure
844 /// correct behavior a user has to flush the instruction cache after modifying and before
845 /// executing the page.
846 ///
847 /// In case of failure, this returns the ownership of `self`.
848 pub unsafe fn make_exec_no_flush(mut self) -> Result<Reserved, (Self, Error)> {
849 if let Err(e) = self.inner.make_exec() {
850 return Err((self, e));
851 }
852
853 Ok(Reserved { inner: self.inner })
854 }
855
856 /// Remaps this mapping to be mutable.
857 ///
858 /// In case of failure, this returns the ownership of `self`. If you are
859 /// not interested in this feature, you can use the implementation of
860 /// the [`TryFrom`] trait instead.
861 pub fn make_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
862 if let Err(e) = self.inner.make_mut() {
863 return Err((self, e));
864 }
865
866 Ok(ReservedMut { inner: self.inner })
867 }
868
869 /// Remaps this mapping to be executable and mutable.
870 ///
871 /// While this may seem useful for self-modifying
872 /// code and JIT engines, it is instead recommended to convert between mutable and executable
873 /// mappings using [`Mmap::make_mut()`] and [`MmapMut::make_exec()`] instead.
874 ///
875 /// Make sure to read the text below to understand the complications of this function before
876 /// using it. The [`UnsafeMmapFlags::JIT`] flag must be set for this function to succeed.
877 ///
878 /// # Safety
879 ///
880 /// RWX pages are an interesting targets to attackers, e.g. for buffer overflow attacks, as RWX
881 /// mappings can potentially simplify such attacks. Without RWX mappings, attackers instead
882 /// have to resort to return-oriented programming (ROP) gadgets. To prevent buffer overflow
883 /// attacks, contemporary CPUs allow pages to be marked as non-executable which is then used by
884 /// the operating system to ensure that pages are either marked as writeable or as executable,
885 /// but not both. This is also known as W^X.
886 ///
887 /// While the x86 and x86-64 architectures guarantee cache coherency between the L1 instruction
888 /// and the L1 data cache, other architectures such as Arm and AArch64 do not. If the user
889 /// modified the pages, then executing the code may result in undefined behavior. To ensure
890 /// correct behavior a user has to flush the instruction cache after modifying and before
891 /// executing the page.
892 ///
893 /// In case of failure, this returns the ownership of `self`.
894 pub unsafe fn make_exec_mut(mut self) -> Result<ReservedMut, (Self, Error)> {
895 if let Err(e) = self.inner.make_exec_mut() {
896 return Err((self, e));
897 }
898
899 Ok(ReservedMut { inner: self.inner })
900 }
901 }
902 };
903}
904
905/// Represents an inaccessible memory mapping in a reserved state, i.e. a memory mapping that is not
906/// backed by any physical pages yet.
907#[derive(Debug)]
908pub struct ReservedNone {
909 inner: platform::Mmap,
910}
911
912reserved_impl!(ReservedNone);
913reserved_mmap_impl!(ReservedNone);
914
915impl TryFrom<ReservedNone> for MmapNone {
916 type Error = Error;
917
918 fn try_from(mut reserved_none: ReservedNone) -> Result<MmapNone, Error> {
919 reserved_none.inner.commit()?;
920
921 Ok(MmapNone {
922 inner: reserved_none.inner,
923 })
924 }
925}
926
927impl TryFrom<ReservedMut> for Reserved {
928 type Error = Error;
929 fn try_from(mmap_mut: ReservedMut) -> Result<Self, Self::Error> {
930 match mmap_mut.make_read_only() {
931 Ok(mmap) => Ok(mmap),
932 Err((_, e)) => Err(e),
933 }
934 }
935}
936
937impl TryFrom<ReservedNone> for Reserved {
938 type Error = Error;
939 fn try_from(mmap_none: ReservedNone) -> Result<Self, Self::Error> {
940 match mmap_none.make_read_only() {
941 Ok(mmap) => Ok(mmap),
942 Err((_, e)) => Err(e),
943 }
944 }
945}
946
947/// Represents an immutable memory mapping in a reserved state, i.e. a memory mapping that is not
948/// backed by any physical pages yet.
949#[derive(Debug)]
950pub struct Reserved {
951 inner: platform::Mmap,
952}
953
954reserved_impl!(Reserved);
955reserved_mmap_impl!(Reserved);
956
957impl TryFrom<Reserved> for Mmap {
958 type Error = Error;
959
960 fn try_from(mut reserved: Reserved) -> Result<Mmap, Error> {
961 reserved.inner.commit()?;
962
963 Ok(Mmap {
964 inner: reserved.inner,
965 })
966 }
967}
968
969/// Represents a mutable memory mapping in a reserved state, i.e. a memory mapping that is not
970/// backed by any physical pages yet.
971#[derive(Debug)]
972pub struct ReservedMut {
973 inner: platform::Mmap,
974}
975
976reserved_impl!(ReservedMut);
977reserved_mmap_impl!(ReservedMut);
978
979impl TryFrom<ReservedMut> for MmapMut {
980 type Error = Error;
981
982 fn try_from(mut reserved_mut: ReservedMut) -> Result<MmapMut, Error> {
983 reserved_mut.inner.commit()?;
984
985 Ok(MmapMut {
986 inner: reserved_mut.inner,
987 })
988 }
989}
990
991impl TryFrom<Reserved> for ReservedMut {
992 type Error = Error;
993 fn try_from(mmap: Reserved) -> Result<Self, Self::Error> {
994 match mmap.make_mut() {
995 Ok(mmap_mut) => Ok(mmap_mut),
996 Err((_, e)) => Err(e),
997 }
998 }
999}
1000
1001impl TryFrom<ReservedNone> for ReservedMut {
1002 type Error = Error;
1003 fn try_from(mmap_none: ReservedNone) -> Result<Self, Self::Error> {
1004 match mmap_none.make_mut() {
1005 Ok(mmap_mut) => Ok(mmap_mut),
1006 Err((_, e)) => Err(e),
1007 }
1008 }
1009}