reloaded_memory_buffers/internal/
buffer_allocator.rs1use crate::structs::errors::BufferAllocationError;
2use crate::structs::internal::LocatorItem;
3use crate::structs::params::BufferAllocatorSettings;
4use crate::utilities::address_range::AddressRange;
5use crate::utilities::mathematics::{
6 add_with_overflow_cap, round_down, round_up, subtract_with_underflow_cap,
7};
8
9#[cfg_attr(feature = "size_opt", optimize(size))]
10pub fn allocate(
11 settings: &mut BufferAllocatorSettings,
12) -> Result<LocatorItem, BufferAllocationError> {
13 settings.sanitize();
14
15 #[cfg(target_os = "windows")]
16 return crate::internal::buffer_allocator_windows::allocate_windows(settings);
17
18 #[cfg(target_os = "linux")]
19 return crate::internal::buffer_allocator_linux::allocate_linux(settings);
20
21 #[cfg(target_os = "macos")]
22 return crate::internal::buffer_allocator_osx::allocate_osx(settings);
23
24 #[cfg(not(any(target_os = "macos", target_os = "windows", target_os = "linux")))]
26 crate::internal::buffer_allocator_mmap_rs::allocate_mmap_rs(settings)
27}
28
29pub unsafe fn get_possible_buffer_addresses(
30 minimum_ptr: usize,
31 maximum_ptr: usize,
32 page_start: usize,
33 page_end: usize,
34 buf_size: usize,
35 allocation_granularity: usize,
36 results: &mut [usize; 4],
37) -> &[usize] {
38 let min_max_range = AddressRange::new(minimum_ptr, maximum_ptr);
40 let page_range = AddressRange::new(page_start, page_end);
41
42 if !page_range.overlaps(&min_max_range) {
44 return &results[0..0];
45 }
46
47 if buf_size > page_range.size() {
50 return &results[0..0]; }
52
53 let mut num_items = 0;
62
63 let page_min_aligned = round_up(page_range.start_pointer, allocation_granularity);
64 let page_min_range = AddressRange::new(
65 page_min_aligned,
66 add_with_overflow_cap(page_min_aligned, buf_size),
67 );
68
69 if page_range.contains(&page_min_range) && min_max_range.contains(&page_min_range) {
70 results[num_items] = page_min_range.start_pointer;
71 num_items += 1;
72 }
73
74 let page_max_aligned = round_down(
76 subtract_with_underflow_cap(page_range.end_pointer, buf_size),
77 allocation_granularity,
78 );
79 let page_max_range = AddressRange::new(page_max_aligned, page_max_aligned + buf_size);
80
81 if page_range.contains(&page_max_range) && min_max_range.contains(&page_max_range) {
82 results[num_items] = page_max_range.start_pointer;
83 num_items += 1;
84 }
85
86 let ptr_min_aligned = round_up(minimum_ptr, allocation_granularity);
90 let ptr_min_range = AddressRange::new(
91 ptr_min_aligned,
92 add_with_overflow_cap(ptr_min_aligned, buf_size),
93 );
94
95 if page_range.contains(&ptr_min_range) && min_max_range.contains(&ptr_min_range) {
96 results[num_items] = ptr_min_range.start_pointer;
97 num_items += 1;
98 }
99
100 let ptr_max_aligned = round_down(
102 subtract_with_underflow_cap(maximum_ptr, buf_size),
103 allocation_granularity,
104 );
105 let ptr_max_range = AddressRange::new(ptr_max_aligned, ptr_max_aligned + buf_size);
106
107 if page_range.contains(&ptr_max_range) && min_max_range.contains(&ptr_max_range) {
108 results[num_items] = ptr_max_range.start_pointer;
109 num_items += 1;
110 }
111
112 &results[0..num_items]
113}
114
115#[cfg(test)]
116mod tests {
117 use super::*;
118 #[cfg(target_os = "windows")]
119 use crate::internal::buffer_allocator_windows::{Kernel32, LocalKernel32};
120 use crate::utilities::cached::get_sys_info;
121 use std::ffi::c_void;
122
123 const ALLOCATION_GRANULARITY: usize = 65536; #[test]
126 fn page_does_not_overlap_with_min_max() {
127 let min_ptr = 100000;
128 let max_ptr = 200000;
129 let page_size = 50000;
130 let buf_size = 30000;
131
132 let page_start = max_ptr + 1;
134 let page_end = page_start + page_size;
135
136 unsafe {
137 let buffer: &mut [usize; 4] = &mut [0; 4];
138 let result = get_possible_buffer_addresses(
139 min_ptr,
140 max_ptr,
141 page_start,
142 page_end,
143 buf_size,
144 ALLOCATION_GRANULARITY,
145 buffer,
146 )
147 .len();
148 assert_eq!(0, result);
149 }
150 }
151
152 #[test]
153 fn buffer_size_greater_than_page() {
154 let min_ptr = 100000;
155 let max_ptr = 200000;
156 let page_size = 30000;
157 let buf_size = 50000; let page_start = min_ptr;
161 let page_end = page_start + page_size;
162
163 unsafe {
164 let buffer: &mut [usize; 4] = &mut [0; 4];
165 let result = get_possible_buffer_addresses(
166 min_ptr,
167 max_ptr,
168 page_start,
169 page_end,
170 buf_size,
171 ALLOCATION_GRANULARITY,
172 buffer,
173 )
174 .len();
175 assert_eq!(0, result);
176 }
177 }
178
179 #[test]
180 fn round_up_from_ptr_min() {
181 let min_ptr = 100000;
182 let max_ptr = 200000;
183 let page_size = 200000;
184 let buf_size = 30000;
185
186 let page_start = min_ptr - 50000;
188 let page_end = page_start + page_size;
189
190 unsafe {
191 let buffer: &mut [usize; 4] = &mut [0; 4];
192 let result = get_possible_buffer_addresses(
193 min_ptr,
194 max_ptr,
195 page_start,
196 page_end,
197 buf_size,
198 ALLOCATION_GRANULARITY,
199 buffer,
200 )[0];
201 assert!(result > 0);
202 }
203 }
204
205 #[test]
206 fn round_up_from_page_min() {
207 let min_ptr = 1;
208 let max_ptr = 200000;
209 let page_size = 100000;
210 let buf_size = 30000;
211
212 let page_start = min_ptr + 5000; let page_end = page_start + page_size;
215
216 unsafe {
217 let buffer: &mut [usize; 4] = &mut [0; 4];
218 let result = get_possible_buffer_addresses(
219 min_ptr,
220 max_ptr,
221 page_start,
222 page_end,
223 buf_size,
224 ALLOCATION_GRANULARITY,
225 buffer,
226 )[0];
227 assert_eq!(result, round_up(page_start, ALLOCATION_GRANULARITY));
228 }
229 }
230
231 #[test]
232 fn round_down_from_ptr_max() {
233 let min_ptr = 10000;
234 let mut max_ptr = 200000;
235 let page_size = 1000000;
236 let buf_size = 30000;
237
238 max_ptr -= 5000; let page_start = 80000;
243 let page_end = page_start + page_size;
244
245 unsafe {
246 let buffer: &mut [usize; 4] = &mut [0; 4];
247 let result = get_possible_buffer_addresses(
248 min_ptr,
249 max_ptr,
250 page_start,
251 page_end,
252 buf_size,
253 ALLOCATION_GRANULARITY,
254 buffer,
255 )[0];
256 assert_eq!(
257 result,
258 round_down(max_ptr - buf_size, ALLOCATION_GRANULARITY)
259 );
260 }
261 }
262
263 #[test]
264 fn round_down_from_page_max() {
265 let min_ptr = 1;
266 let max_ptr = 200000;
267 let page_size = 120000;
268 let buf_size = 30000;
269
270 let page_start = min_ptr;
272 let page_end = page_start + page_size - 5000; unsafe {
275 let buffer: &mut [usize; 4] = &mut [0; 4];
276 let result = get_possible_buffer_addresses(
277 min_ptr,
278 max_ptr,
279 page_start,
280 page_end,
281 buf_size,
282 ALLOCATION_GRANULARITY,
283 buffer,
284 )[0];
285 assert_eq!(
286 result,
287 round_down(page_end - buf_size, ALLOCATION_GRANULARITY)
288 );
289 }
290 }
291
292 #[test]
295 #[cfg(not(target_os = "macos"))]
296 fn can_allocate_in_2gib() {
297 let mut settings = BufferAllocatorSettings {
298 min_address: 0,
299 max_address: i32::MAX as usize,
300 size: 4096,
301 target_process_id: get_sys_info().this_process_id,
302 retry_count: 8,
303 brute_force: false,
304 };
305
306 let item = allocate(&mut settings).unwrap();
307 let base_addr = item.base_address.value;
308 assert_ne!(base_addr, 0);
309 assert!(item.size >= settings.size);
310 free(item);
311 }
312
313 #[test]
314 fn can_allocate_up_to_max_address() {
315 let mut settings = BufferAllocatorSettings {
316 min_address: get_sys_info().max_address / 2,
317 max_address: get_sys_info().max_address,
318 size: 4096,
319 target_process_id: get_sys_info().this_process_id,
320 retry_count: 8,
321 brute_force: false,
322 };
323
324 let item = allocate(&mut settings).unwrap();
325 let base_addr = item.base_address.value;
326 assert_ne!(base_addr, 0);
327 assert!(item.size >= settings.size);
328 free(item);
329 }
330
331 fn free(item: LocatorItem) {
333 #[cfg(target_os = "windows")]
334 free_windows(item);
335
336 #[cfg(unix)]
337 free_libc(item);
338 }
339
340 #[cfg(target_os = "windows")]
342 fn free_windows(item: LocatorItem) {
343 let k32 = LocalKernel32 {};
344 let success = k32.virtual_free(item.base_address.value as *mut c_void, 0);
345 assert!(success);
346 }
347
348 #[cfg(unix)]
349 fn free_libc(item: LocatorItem) {
350 unsafe {
351 libc::munmap(item.base_address.value as *mut c_void, item.size as usize);
352 }
353 }
354}