1use crate::traits::{AsPages, Protectable};
4
5use std::cell::SyncUnsafeCell;
6use std::convert::TryInto;
7use std::intrinsics::likely;
8use std::io::Error;
9use std::marker::PhantomData;
10use std::mem::{MaybeUninit, ManuallyDrop};
11use std::ops::Range;
12use std::ptr::{self, NonNull};
13use std::sync::atomic::{AtomicUsize, Ordering};
14
15#[cfg(windows)]
16use winapi::um::winnt;
17
18#[cfg(unix)]
19use std::ffi::c_void;
20
21#[cfg(windows)]
22use winapi::ctypes::c_void;
23
24#[cfg(doc)]
25pub enum Protection {
27 NoAccess,
29
30 ReadOnly,
32
33 ReadWrite
35}
36
37#[cfg(all(unix, not(doc)))]
38#[repr(i32)]
39pub enum Protection {
40 NoAccess = libc::PROT_NONE,
41 ReadOnly = libc::PROT_READ,
42 ReadWrite = libc::PROT_READ | libc::PROT_WRITE,
43}
44
45#[cfg(all(windows, not(doc)))]
46#[repr(u32)]
47pub enum Protection {
48 NoAccess = winnt::PAGE_NOACCESS,
49 ReadOnly = winnt::PAGE_READONLY,
50 ReadWrite = winnt::PAGE_READWRITE,
51}
52
53#[must_use]
55#[derive(Debug)]
56pub struct Pages<'t>(NonNull<[u8]>, PhantomData<&'t ()>);
57
58#[must_use]
60#[derive(Debug)]
61pub struct Allocation(NonNull<[u8]>);
62
63#[must_use]
65#[derive(Debug)]
66pub struct GuardedAlloc<const N: usize = 1>(Allocation);
67
68static PAGE_SIZE: MaybeUninit<SyncUnsafeCell<AtomicUsize>> = MaybeUninit::uninit();
70
71#[cfg(windows)]
72static GRANULARITY: MaybeUninit<SyncUnsafeCell<AtomicUsize>> = MaybeUninit::uninit();
74
75#[ctor::ctor]
76fn init() {
77 #[cfg(unix)] {
78 use libc::{sysconf, _SC_PAGESIZE};
79
80 let pg = unsafe { sysconf(_SC_PAGESIZE) };
81 assert!(pg > 0);
82
83 unsafe { SyncUnsafeCell::raw_get(PAGE_SIZE.as_ptr()).as_ref_unchecked() }
84 .store(pg.try_into().unwrap(), Ordering::SeqCst);
85 };
86
87 #[cfg(windows)] {
88 use winapi::um::sysinfoapi::{SYSTEM_INFO, GetSystemInfo};
89
90 let mut si = MaybeUninit::<SYSTEM_INFO>::uninit();
91 unsafe { GetSystemInfo(si.as_mut_ptr()); }
92
93 unsafe { SyncUnsafeCell::raw_get(PAGE_SIZE.as_ptr()).as_ref_unchecked() }
94 .store(unsafe { si.assume_init() }.dwPageSize.try_into().unwrap(), Ordering::SeqCst);
95
96 unsafe { SyncUnsafeCell::raw_get(GRANULARITY.as_ptr()).as_ref_unchecked() }
97 .store(unsafe { si.assume_init() }.dwAllocationGranularity.try_into().unwrap(), Ordering::SeqCst);
98 };
99}
100
101impl<'t> Pages<'t> {
102 #[must_use]
103 pub fn granularity() -> usize {
104 unsafe { SyncUnsafeCell::raw_get(PAGE_SIZE.as_ptr()).as_ref_unchecked() }
105 .load(Ordering::Relaxed)
106 }
107
108 #[must_use]
109 pub fn align(offset: usize) -> usize {
110 offset.next_multiple_of(Self::granularity())
111 }
112
113 pub unsafe fn from_slice(slice: NonNull<[u8]>) -> Pages<'t> {
119 debug_assert_eq!(slice.as_ptr().cast::<u8>().align_offset(Self::granularity()), 0);
121 debug_assert_eq!(Self::align(slice.len()), slice.len());
122
123 Self(slice, PhantomData)
124 }
125
126 pub unsafe fn from_raw_parts(ptr: NonNull<u8>, size: usize) -> Pages<'t> {
133 Self::from_slice(NonNull::slice_from_raw_parts(ptr, size))
134 }
135
136 pub unsafe fn from_ptr<T>(ptr: *mut T, size: usize) -> Pages<'t> {
147 Self::from_raw_parts(NonNull::new(ptr.cast::<u8>()).unwrap(), size)
148 }
149
150 #[must_use]
151 pub fn as_ptr<T>(&self) -> *mut T {
152 debug_assert!(std::mem::align_of::<T>() < Self::granularity());
153 self.0.as_ptr().cast::<T>()
154 }
155
156 #[must_use] #[inline]
157 pub const fn into_slice(self) -> NonNull<[u8]> {
158 self.0
159 }
160
161 #[must_use] #[inline]
162 pub fn size(&self) -> usize {
163 self.0.len()
164 }
165
166 #[must_use]
167 pub fn len(&self) -> usize {
168 self.size() / Self::granularity()
169 }
170
171 #[must_use] #[inline]
172 pub fn is_empty(&self) -> bool {
173 self.size() == 0
174 }
175
176 #[allow(clippy::missing_errors_doc)]
177 pub fn protect(&self, prot: Protection) -> Result<(), Error> {
178 #[cfg(unix)] {
179 use libc::mprotect;
180 use std::os::raw::c_int;
181
182 match unsafe { mprotect(self.as_ptr::<c_void>(), self.0.len(), prot as c_int) } {
183 0 => Ok(()),
184 _ => Err(Error::last_os_error()),
185 }
186 }
187
188 #[cfg(windows)] {
189 use winapi::shared::minwindef::DWORD;
190 use winapi::um::memoryapi::VirtualProtect;
191
192 let mut old = MaybeUninit::<DWORD>::uninit();
193 match unsafe { VirtualProtect(self.as_ptr::<c_void>(), self.0.len(), prot as DWORD, old.as_mut_ptr()) } {
194 0 => Err(Error::last_os_error()),
195 _ => Ok(()),
196 }
197 }
198 }
199
200 #[allow(clippy::missing_errors_doc)]
201 pub fn lock(&self) -> Result<(), Error> {
202 #[cfg(unix)] {
203 use libc::mlock;
204
205 match unsafe { mlock(self.as_ptr::<c_void>(), self.0.len()) } {
206 0 => Ok(()),
207 _ => Err(Error::last_os_error()),
208 }
209 }
210
211 #[cfg(windows)] {
212 use winapi::um::memoryapi::VirtualLock;
213
214 match unsafe { VirtualLock(self.as_ptr::<c_void>(), self.0.len()) } {
215 0 => Err(Error::last_os_error()),
216 _ => Ok(()),
217 }
218 }
219 }
220
221 #[allow(clippy::missing_errors_doc)]
222 pub fn unlock(&self) -> Result<(), Error> {
223 #[cfg(unix)] {
224 use libc::munlock;
225
226 match unsafe { munlock(self.as_ptr::<c_void>(), self.0.len()) } {
227 0 => Ok(()),
228 _ => Err(Error::last_os_error()),
229 }
230 }
231
232 #[cfg(windows)] {
233 use winapi::um::memoryapi::VirtualUnlock;
234
235 match unsafe { VirtualUnlock(self.as_ptr::<c_void>(), self.0.len()) } {
236 0 => Err(Error::last_os_error()),
237 _ => Ok(()),
238 }
239 }
240 }
241
242 #[must_use]
243 pub fn pages(&'t self, range: Range<usize>) -> Option<Pages<'t>> {
244 if likely(range.start < self.len() && range.end <= self.len()) {
245 Some(unsafe {
246 Self::from_ptr(self.as_ptr::<u8>().add(range.start * Self::granularity()),
247 (range.end - range.start - 1) * Self::granularity())
248 })
249 } else {
250 None
251 }
252 }
253}
254
255impl Allocation {
256 #[must_use]
257 pub fn granularity() -> usize {
258 #[cfg(unix)] {
259 Pages::granularity()
260 }
261
262 #[cfg(windows)] {
263 unsafe { SyncUnsafeCell::raw_get(GRANULARITY.as_ptr()).as_ref_unchecked() }
264 .load(Ordering::Relaxed)
265 }
266 }
267
268 #[must_use]
269 pub fn align(offset: usize) -> usize {
270 offset.next_multiple_of(Self::granularity())
271 }
272
273 pub unsafe fn from_slice(slice: NonNull<[u8]>) -> Self {
280 debug_assert_eq!(slice.as_ptr().cast::<u8>().align_offset(Self::granularity()), 0);
282 debug_assert_eq!(Self::align(slice.len()), slice.len());
283
284 Self(slice)
285 }
286
287 pub unsafe fn from_raw_parts(ptr: NonNull<u8>, size: usize) -> Self {
294 Self::from_slice(NonNull::slice_from_raw_parts(ptr, size))
295 }
296
297 pub unsafe fn from_ptr<T>(ptr: *mut T, size: usize) -> Self {
308 Self::from_raw_parts(NonNull::new(ptr.cast::<u8>()).unwrap(), size)
309 }
310
311 #[must_use]
312 pub fn as_ptr<T>(&self) -> *mut T {
313 debug_assert!(std::mem::align_of::<T>() < Self::granularity());
314 self.0.as_ptr().cast::<T>()
315 }
316
317 #[must_use] #[inline]
318 pub fn into_ptr<T>(self) -> *mut T {
319 ManuallyDrop::new(self).as_ptr()
320 }
321
322 #[must_use] #[inline]
323 pub fn into_slice(self) -> NonNull<[u8]> {
324 ManuallyDrop::new(self).0
325 }
326
327 #[must_use] #[inline]
328 pub fn size(&self) -> usize {
329 self.0.len()
330 }
331
332 #[must_use]
333 pub fn len(&self) -> usize {
334 self.size() / Pages::granularity()
335 }
336
337 #[must_use] #[inline]
338 pub fn is_empty(&self) -> bool {
339 self.size() == 0
340 }
341
342 #[allow(clippy::missing_errors_doc)]
343 pub fn new(size: usize, prot: Protection) -> Result<Self, Error> {
344 let size = Self::align(size);
345
346 #[cfg(unix)] {
347 use libc::{mmap, MAP_PRIVATE, MAP_ANON, MAP_FAILED};
348 use std::os::raw::c_int;
349
350 match unsafe { mmap(ptr::null_mut(), size, prot as c_int, MAP_PRIVATE | MAP_ANON, -1, 0) } {
351 MAP_FAILED => Err(Error::last_os_error()),
352 addr => Ok(unsafe { Self::from_ptr(addr, size) }),
353 }
354 }
355
356 #[cfg(windows)] {
357 use winapi::shared::minwindef::DWORD;
358 use winapi::shared::ntdef::NULL;
359 use winapi::um::memoryapi::VirtualAlloc;
360 use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE};
361
362 match unsafe { VirtualAlloc(ptr::null_mut(), size, MEM_COMMIT | MEM_RESERVE, prot as DWORD) } {
363 NULL => Err(Error::last_os_error()),
364 addr => Ok(unsafe { Self::from_ptr(addr, size) }),
365 }
366 }
367 }
368
369 #[allow(clippy::missing_errors_doc)]
373 pub fn shrink(self, size: usize) -> Result<Self, Error> {
374 assert!(size < self.0.len());
375
376 let size = Pages::align(size);
377 let diff = self.0.len() - size;
378
379 if diff > 0 {
380 #[cfg(unix)] {
381 use libc::munmap;
382
383 match unsafe { munmap(self.as_ptr::<u8>().add(size).cast::<c_void>(), diff) } {
384 0 => Ok(unsafe { Self::from_ptr(self.into_ptr::<c_void>(), size) }),
385 _ => Err(Error::last_os_error()),
386 }
387 }
388
389 #[cfg(windows)] {
390 use winapi::um::memoryapi::VirtualFree;
391 use winapi::um::winnt::MEM_DECOMMIT;
392
393 match unsafe { VirtualFree(self.as_ptr::<u8>().add(size).cast::<c_void>(), diff, MEM_DECOMMIT) } {
394 0 => Err(Error::last_os_error()),
395 _ => Ok(unsafe { Self::from_ptr(self.into_ptr::<c_void>(), size) }),
396 }
397 }
398 } else {
399 Ok(self)
400 }
401 }
402
403 #[must_use]
404 pub fn pages(&self, range: Range<usize>) -> Option<Pages> {
405 if likely(range.start < self.len() && range.end <= self.len()) {
406 Some(unsafe {
407 Pages::from_ptr(self.as_ptr::<u8>().add(range.start * Pages::granularity()),
408 (range.end - range.start) * Pages::granularity())
409 })
410 } else {
411 None
412 }
413 }
414}
415
416impl Drop for Allocation {
417 fn drop(&mut self) {
418 #[cfg(unix)] {
419 use libc::munmap;
420
421 assert_eq!(unsafe { munmap(self.as_ptr::<c_void>(), self.0.len()) }, 0,
422 "{}", Error::last_os_error());
423 }
424
425 #[cfg(windows)] {
426 use winapi::um::memoryapi::VirtualFree;
427 use winapi::um::winnt::MEM_RELEASE;
428
429 assert_ne!(unsafe { VirtualFree(self.as_ptr::<c_void>(), 0, MEM_RELEASE) }, 0,
430 "{}", Error::last_os_error());
431 }
432 }
433}
434
435impl<const N: usize> GuardedAlloc<N> {
436 pub const GUARD_PAGES: usize = N;
437
438 #[must_use]
439 pub fn guard_size() -> usize {
440 Self::GUARD_PAGES * Pages::granularity()
441 }
442
443 #[must_use]
444 pub fn outer_size(size: usize) -> usize {
445 Allocation::align(size + 2 * Self::guard_size())
446 }
447
448 #[must_use]
449 pub fn inner_size(size: usize) -> usize {
450 Self::outer_size(size) - 2 * Self::guard_size()
451 }
452
453 #[allow(clippy::missing_errors_doc)]
454 pub fn new(size: usize, prot: Protection) -> Result<Self, Error> {
455 let alloc = Self(Allocation::new(Self::outer_size(size), Protection::NoAccess)?);
456
457 if likely(!alloc.inner().is_empty()) {
458 alloc.inner().protect(prot)?;
459 }
460
461 Ok(alloc)
462 }
463
464 #[allow(clippy::missing_panics_doc)]
465 pub fn inner(&self) -> Pages {
466 self.0.pages(Self::GUARD_PAGES .. self.0.len() - Self::GUARD_PAGES).unwrap()
467 }
468
469 pub unsafe fn from_raw_parts(base: NonNull<u8>, inner: usize) -> Self {
476 debug_assert_eq!(base.as_ptr().align_offset(Pages::granularity()), 0);
477
478 let ptr = base.as_ptr().sub(Self::guard_size());
479 let outer = Self::outer_size(inner);
480
481 debug_assert_eq!(ptr.align_offset(Allocation::granularity()), 0);
482 debug_assert_eq!(Allocation::align(outer), outer);
483
484 Self(Allocation::from_ptr(ptr, outer))
485 }
486
487 pub unsafe fn from_ptr<T>(base: *mut T, inner: usize) -> Self {
498 Self::from_raw_parts(NonNull::new(base.cast::<u8>()).unwrap(), inner)
499 }
500
501 #[must_use] #[allow(clippy::missing_panics_doc)]
502 pub fn into_slice(self) -> NonNull<[u8]> {
503 let len = self.0.len();
504 ManuallyDrop::new(self.0).pages(Self::GUARD_PAGES .. len - Self::GUARD_PAGES).unwrap().into_slice()
505 }
506
507 pub fn into_pages(self) -> Pages<'static> {
508 unsafe { Pages::from_slice(self.into_slice()) }
509 }
510
511 #[allow(clippy::missing_errors_doc, clippy::missing_panics_doc)]
512 pub fn shrink(self, size: usize) -> Result<Self, Error> {
513 let outer = Self::outer_size(size);
514
515 if outer < self.0.size() {
516 let pages = outer / Pages::granularity();
517 self.0.pages(pages - Self::GUARD_PAGES .. pages).unwrap().protect(Protection::NoAccess)?;
518 Ok(Self(self.0.shrink(outer)?))
519 } else {
520 Ok(self)
521 }
522 }
523}
524
525impl<T: AsPages> Protectable for T {
526 fn lock(&self) -> Result<(), Error> {
527 if let Some(pages) = self.as_pages() {
528 pages.protect(Protection::NoAccess)?;
529 }
530
531 Ok(())
532 }
533
534 fn unlock(&self) -> Result<(), Error> {
535 if let Some(pages) = self.as_pages() {
536 pages.protect(Protection::ReadOnly)?;
537 }
538
539 Ok(())
540 }
541
542 fn unlock_mut(&mut self) -> Result<(), Error> {
543 if let Some(pages) = self.as_pages() {
544 pages.protect(Protection::ReadWrite)?;
545 }
546
547 Ok(())
548 }
549}
550
551#[cfg(test)]
552mod tests {
553 use super::*;
554
555 #[test]
556 fn page_size() {
557 assert!(Pages::granularity().is_power_of_two());
558
559 assert!(Pages::granularity() >= 4096);
561
562 #[cfg(target_arch = "riscv64")]
563 assert!(Pages::granularity() == 4096
564 || Pages::granularity() == 65536);
565
566 #[cfg(target_arch = "aarch64")]
567 assert!(Pages::granularity() == 4096
568 || Pages::granularity() == 16384
569 || Pages::granularity() == 65536);
570
571 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
572 assert_eq!(Pages::granularity(), 4096);
573 }
574
575 #[test]
576 fn alloc_size() {
577 assert!(Allocation::granularity().is_power_of_two());
578 assert!(Allocation::granularity() >= Pages::granularity());
579 assert_eq!(Pages::align(Allocation::granularity()), Allocation::granularity());
580 }
581
582 fn raw_range(range: std::ops::Range<usize>, samples: usize) {
583 use rand::SeedableRng;
584 use rand::distr::{Distribution, Uniform};
585
586 let mut rng = rand_xoshiro::Xoshiro256PlusPlus::from_os_rng();
587 let dist = Uniform::try_from(range).unwrap();
588
589 for _ in 0..samples {
590 let size = dist.sample(&mut rng);
591
592 eprintln!("Allocating {} bytes", size);
593
594 let alloc = Allocation::new(size, Protection::ReadWrite).unwrap();
595
596 assert!(alloc.size() >= size);
597
598 let slice = unsafe { std::slice::from_raw_parts_mut(alloc.as_ptr::<u8>(), alloc.size()) };
599
600 for elem in slice.iter() {
601 assert_eq!(*elem, 0);
602 }
603
604 slice.fill(0x55);
605
606 for elem in slice.iter() {
607 assert_eq!(*elem, 0x55);
608 }
609 }
610 }
611
612 #[test]
613 fn raw_tiny() {
614 raw_range(1..4096, 4095);
615 }
616
617 #[test]
618 fn raw_small() {
619 raw_range(4096..65536, 256);
620 }
621
622 #[test]
623 fn raw_medium() {
624 raw_range(65536..4194304, 64);
625 }
626
627 #[test]
628 fn raw_large() {
629 raw_range(4194304..16777216, 16);
630 }
631
632 #[test]
633 fn raw_huge() {
634 raw_range(4194304..268435456, 4);
635 }
636
637 #[cfg(target_os = "linux")]
638 #[test]
639 fn raw_protection() {
640 use bulletproof::Bulletproof;
641
642 let size = Allocation::granularity();
643 let bp = unsafe { Bulletproof::new() };
644 let alloc = Allocation::new(size, Protection::NoAccess).unwrap();
645 let ptr = alloc.as_ptr::<u8>();
646
647 for i in 0..size {
648 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Err(()));
649 assert_eq!(unsafe { bp.store(ptr.add(i), &0xff) }, Err(()));
650 }
651
652 alloc.pages(0 .. size / Pages::granularity()).unwrap().protect(Protection::ReadOnly).unwrap();
653
654 for i in 0..size {
655 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
656 assert_eq!(unsafe { bp.store(ptr.add(i), &0x55) }, Err(()));
657 }
658
659 alloc.pages(0 .. size / Pages::granularity()).unwrap().protect(Protection::ReadWrite).unwrap();
660
661 for i in 0..size {
662 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
663 assert_eq!(unsafe { bp.store(ptr.add(i), &0x55) }, Ok(()));
664 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0x55));
665 }
666 }
667
668 #[cfg(target_os = "linux")]
669 #[test]
670 fn raw_shrink() {
671 use bulletproof::Bulletproof;
672
673 let size_0 = std::cmp::max(Allocation::granularity(), 2 * Pages::granularity());
674 let bp = unsafe { Bulletproof::new() };
675 let alloc_0 = Allocation::new(size_0, Protection::ReadWrite).unwrap();
676 assert_eq!(alloc_0.size(), size_0);
677
678 let ptr = alloc_0.as_ptr::<u8>();
679
680 for i in 0..size_0 {
681 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
682 assert_eq!(unsafe { bp.store(ptr.add(i), &0x55) }, Ok(()));
683 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0x55));
684 }
685
686 let size_1 = size_0 - Pages::granularity();
687
688 let alloc_1 = alloc_0.shrink(size_1).unwrap();
689 assert_eq!(alloc_1.size(), size_1);
690
691 std::thread::yield_now();
693
694 for i in 0..size_1 {
695 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0x55));
696 }
697
698 for i in size_1 .. size_0 {
699 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Err(()));
700 }
701 }
702
703 fn guarded_range(range: std::ops::Range<usize>, samples: usize) {
704 use rand::SeedableRng;
705 use rand::distr::{Distribution, Uniform};
706
707 let mut rng = rand_xoshiro::Xoshiro256PlusPlus::from_os_rng();
708 let dist = Uniform::try_from(range).unwrap();
709
710 for _ in 0..samples {
711 let size = dist.sample(&mut rng);
712
713 eprintln!("Allocating {} bytes", size);
714
715 let alloc = GuardedAlloc::<1>::new(size, Protection::ReadWrite).unwrap();
716
717 assert!(alloc.inner().size() >= size);
718
719 let slice = unsafe { std::slice::from_raw_parts_mut(alloc.inner().as_ptr::<u8>(), alloc.inner().size()) };
720
721 for elem in slice.iter() {
722 assert_eq!(*elem, 0);
723 }
724
725 slice.fill(0x55);
726
727 for elem in slice.iter() {
728 assert_eq!(*elem, 0x55);
729 }
730 }
731 }
732
733 #[test]
734 fn guarded_tiny() {
735 guarded_range(0..4096, 4096);
736 }
737
738 #[test]
739 fn guarded_small() {
740 guarded_range(4096..65536, 256);
741 }
742
743 #[test]
744 fn guarded_medium() {
745 guarded_range(65536..4194304, 64);
746 }
747
748 #[test]
749 fn guarded_large() {
750 guarded_range(4194304..16777216, 16);
751 }
752
753 #[test]
754 fn guarded_huge() {
755 guarded_range(4194304..268435456, 4);
756 }
757
758 #[cfg(target_os = "linux")]
759 #[test]
760 fn guarded_guard() {
761 use bulletproof::Bulletproof;
762
763 let size = Allocation::granularity();
764 let bp = unsafe { Bulletproof::new() };
765 let alloc = GuardedAlloc::<1>::new(size, Protection::ReadWrite).unwrap();
766 let ptr = alloc.inner().as_ptr::<u8>();
767
768 for i in 1 ..= GuardedAlloc::<1>::guard_size() {
770 assert_eq!(unsafe { bp.load(ptr.sub(i)) }, Err(()));
771 }
772
773 for i in 0 .. size {
774 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
775 assert_eq!(unsafe { bp.store(ptr.add(i), &0x55) }, Ok(()));
776 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0x55));
777 }
778
779 for i in size .. GuardedAlloc::<1>::guard_size() {
781 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Err(()));
782 }
783 }
784
785 #[cfg(target_os = "linux")]
786 #[test]
787 fn guarded_shrink() {
788 use crate::pages::Allocation;
789 use bulletproof::Bulletproof;
790
791 let size_0 = std::cmp::max(Allocation::granularity(), 2 * GuardedAlloc::<1>::guard_size());
792
793 let bp = unsafe { Bulletproof::new() };
794 let alloc_0 = GuardedAlloc::<1>::new(size_0, Protection::ReadWrite).unwrap();
795 let ptr = alloc_0.inner().as_ptr::<u8>();
796
797 for i in 0..size_0 {
798 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
799 }
800
801 for i in size_0 .. GuardedAlloc::<1>::guard_size() {
803 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Err(()));
804 }
805
806 let size_1 = size_0 - GuardedAlloc::<1>::guard_size();
807 let alloc_1 = alloc_0.shrink(size_1).unwrap();
808
809 assert_eq!(alloc_1.inner().as_ptr::<u8>(), ptr);
811
812 std::thread::yield_now();
814
815 for i in 0 .. size_1 {
816 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Ok(0));
817 }
818
819 for i in size_1 .. GuardedAlloc::<1>::guard_size() {
821 assert_eq!(unsafe { bp.load(ptr.add(i)) }, Err(()));
822 }
823 }
824}