1
2#[cfg(windows)]
3pub mod _win {
4 use super::*;
5
6 use core::{ptr::null_mut, usize};
7 use std::mem::size_of;
8 use crate::globals::IMMIX_BLOCK_SIZE;
9 use winapi::um::{
10 memoryapi::{VirtualAlloc, VirtualFree},
11 winnt::{MEM_COMMIT, MEM_DECOMMIT, MEM_RELEASE, MEM_RESERVE, PAGE_READWRITE},
12 };
13 pub struct Mmap {
14 start: *mut u8,
15 end: *mut u8,
16 size: usize,
17 }
18 impl Mmap {
19 pub const fn uninit() -> Self {
20 Self {
21 start: null_mut(),
22 end: null_mut(),
23 size: 0,
24 }
25 }
26 pub fn new(size: usize) -> Self {
27 unsafe {
28 let mem = VirtualAlloc(null_mut(), size, MEM_RESERVE, PAGE_READWRITE);
29 let mem = mem as *mut u8;
30
31 let end = mem.add(size);
32
33 Self {
34 start: mem,
35 end,
36 size,
37 }
38 }
39 }
40 pub fn aligned(&self) -> *mut u8 {
42 let offset = IMMIX_BLOCK_SIZE - (self.start as usize) % IMMIX_BLOCK_SIZE;
43 unsafe { self.start.add(offset) as *mut u8 }
44 }
45
46 pub fn start(&self) -> *mut u8 {
47 self.start
48 }
49 pub fn end(&self) -> *mut u8 {
50 self.end
51 }
52
53 pub fn dontneed(&self, page: *mut u8, size: usize) {
54 unsafe {
55 VirtualFree(page.cast(), size, MEM_DECOMMIT);
57 }
58 }
59
60 pub fn commit(&self, page: *mut u8, size: usize) {
61 unsafe {
62 VirtualAlloc(page.cast(), size, MEM_COMMIT, PAGE_READWRITE);
63 }
64 }
65 pub const fn size(&self) -> usize {
66 self.size
67 }
68 }
69
70 impl Drop for Mmap {
71 fn drop(&mut self) {
72 unsafe {
73 VirtualFree(self.start.cast(), self.size, MEM_RELEASE);
74 }
75 }
76 }
77}
78
79#[cfg(unix)]
80pub mod _unix {
81
82 use std::ptr::null_mut;
83
84 use crate::globals::IMMIX_BLOCK_SIZE;
85
86 pub struct Mmap {
87 start: *mut u8,
88 end: *mut u8,
89 size: usize,
90 }
91
92 impl Mmap {
93 pub const fn size(&self) -> usize {
94 self.size
95 }
96 pub const fn uninit() -> Self {
97 Self {
98 start: null_mut(),
99 end: null_mut(),
100 size: 0,
101 }
102 }
103 pub fn new(size: usize) -> Self {
104 unsafe {
105 let map = libc::mmap(
106 core::ptr::null_mut(),
107 size as _,
108 libc::PROT_READ | libc::PROT_WRITE,
109 libc::MAP_PRIVATE | libc::MAP_ANON,
110 -1,
111 0,
112 );
113 libc::madvise(map, size, libc::MADV_SEQUENTIAL);
114 if map == libc::MAP_FAILED {
115 panic!("mmap failed");
116 }
117 Self {
118 start: map as *mut u8,
119 end: (map as usize + size) as *mut u8,
120 size,
121 }
122 }
123 }
124 pub fn aligned(&self) -> *mut u8 {
126 let offset = IMMIX_BLOCK_SIZE - (self.start as usize) % IMMIX_BLOCK_SIZE;
127 unsafe { self.start.add(offset) as *mut u8 }
128 }
129
130 pub fn start(&self) -> *mut u8 {
131 self.start
132 }
133 pub fn end(&self) -> *mut u8 {
134 self.end
135 }
136
137 pub fn dontneed(&self, page: *mut u8, size: usize) {
138 unsafe {
139 libc::madvise(page as *mut _, size as _, libc::MADV_DONTNEED);
140 }
141 }
142
143 pub fn commit(&self, page: *mut u8, size: usize) {
144 unsafe {
145 libc::madvise(
146 page as *mut _,
147 size as _,
148 libc::MADV_WILLNEED | libc::MADV_SEQUENTIAL,
149 );
150 }
151 }
152 }
153
154 impl Drop for Mmap {
155 fn drop(&mut self) {
156 unsafe {
157 libc::munmap(self.start() as *mut _, self.size as _);
158 }
159 }
160 }
161}
162
163#[cfg(unix)]
164pub use _unix::*;
165#[cfg(windows)]
166pub use _win::*;