rd_util/
anon_area.rs

1use super::PAGE_SIZE;
2use num::Integer;
3use rand::rngs::SmallRng;
4use rand::SeedableRng;
5use std::alloc::{alloc, dealloc, Layout};
6use std::cell::RefCell;
7
8std::thread_local!(static RNG: RefCell<SmallRng> = RefCell::new(SmallRng::from_entropy()));
9
10struct AnonUnit {
11    data: *mut u8,
12    layout: Layout,
13}
14
15impl AnonUnit {
16    fn new(size: usize) -> Self {
17        let layout = Layout::from_size_align(size, *PAGE_SIZE).unwrap();
18        Self {
19            data: unsafe { alloc(layout) },
20            layout,
21        }
22    }
23}
24
25unsafe impl Send for AnonUnit {}
26unsafe impl Sync for AnonUnit {}
27
28impl Drop for AnonUnit {
29    fn drop(&mut self) {
30        unsafe {
31            dealloc(self.data, self.layout);
32        }
33    }
34}
35
36pub struct AnonArea {
37    units: Vec<AnonUnit>,
38    size: usize,
39    comp: f64,
40}
41
42/// Anonymous memory which can be shared by multiple threads with RwLock
43/// protection. Accesses to memory positions only require read locking for both
44/// reads and writes.
45impl AnonArea {
46    const UNIT_SIZE: usize = 32 << 20;
47
48    pub fn new(size: usize, comp: f64) -> Self {
49        let mut area = AnonArea {
50            units: Vec::new(),
51            size: 0,
52            comp,
53        };
54        area.resize(size);
55        area
56    }
57
58    pub fn size(&self) -> usize {
59        self.size
60    }
61
62    pub fn resize(&mut self, mut size: usize) {
63        size = size.max(Self::UNIT_SIZE);
64        let nr = Integer::div_ceil(&size, &Self::UNIT_SIZE);
65
66        self.units.truncate(nr);
67        self.units.reserve(nr);
68        for _ in self.units.len()..nr {
69            self.units.push(AnonUnit::new(Self::UNIT_SIZE));
70        }
71
72        self.size = size;
73    }
74
75    /// Determine the page given the relative position `rel` and `size` of
76    /// the anon area. `rel` is in the range [-1.0, 1.0] with the position
77    /// 0.0 mapping to the first page, positive positions to even slots and
78    /// negative odd so that modulating the amplitude of `rel` changes how
79    /// much area is accessed without shifting the center.
80    pub fn rel_to_page_idx(rel: f64, size: usize) -> usize {
81        let addr = ((size / 2) as f64 * rel.abs()) as usize;
82        let mut page_idx = (addr / *PAGE_SIZE) * 2;
83        if rel.is_sign_negative() {
84            page_idx += 1;
85        }
86        page_idx.min(size / *PAGE_SIZE - 1)
87    }
88
89    /// Return a mutable u8 reference to the position specified by the page
90    /// index. The anon area is shared and there's no access control.
91    pub fn access_page<'a, T>(&'a self, page_idx: usize) -> &'a mut [T] {
92        assert!(page_idx < self.size / *PAGE_SIZE);
93        let pages_per_unit = Self::UNIT_SIZE / *PAGE_SIZE;
94        let pos = (
95            page_idx / pages_per_unit,
96            (page_idx % pages_per_unit) * *PAGE_SIZE,
97        );
98        unsafe {
99            let ptr = self.units[pos.0].data.offset(pos.1 as isize);
100            let ptr = ptr.cast::<T>();
101            std::slice::from_raw_parts_mut(ptr, *PAGE_SIZE / std::mem::size_of::<T>())
102        }
103    }
104
105    pub fn fill_page_with_random(&self, page_idx: usize) {
106        RNG.with(|s| {
107            super::fill_area_with_random(
108                self.access_page::<u8>(page_idx),
109                self.comp,
110                &mut *s.borrow_mut(),
111            )
112        });
113    }
114}