buffet/bufpool/
privatepool.rs

1use std::{cell::UnsafeCell, collections::VecDeque, marker::PhantomData};
2
3use memmap2::MmapMut;
4
5use super::BufMut;
6
7pub type Result<T, E = Error> = std::result::Result<T, E>;
8
9#[derive(thiserror::Error, Debug)]
10#[non_exhaustive]
11pub enum Error {
12    #[error("could not mmap buffer")]
13    Mmap(#[from] std::io::Error),
14
15    #[error("out of memory")]
16    OutOfMemory,
17
18    #[error("slice does not fit into this RollMut")]
19    DoesNotFit,
20}
21
22b_x::make_bxable!(Error);
23
24thread_local! {
25    static POOL: Pool = const { Pool::new() };
26}
27
28/// A buffer pool
29struct Pool {
30    inner: UnsafeCell<Option<Inner>>,
31}
32
33struct Inner {
34    // mmapped memory
35    ptr: *mut u8,
36
37    // The mmap object, if we're using an anonymous mapping.
38    // This is only used for its `Drop` implementation.
39    _mmap: Option<MmapMut>,
40
41    // index of free blocks
42    // there's several optimizations we could do here:
43    //   - we could use bitmaps to track which slots are free
44    //   - we could start by only pushing, say, 1024 free slots and then grow the free list as
45    //     needed (when more than 1024 bufs are allocated, we'll grow the free list by another 1024
46    //     slots, etc. until we run out of memory)
47    free: VecDeque<u32>,
48
49    // ref counts start as all zeroes, get incremented when a block is borrowed
50    ref_counts: Vec<i16>,
51}
52
53impl Pool {
54    const fn new() -> Self {
55        Self {
56            inner: UnsafeCell::new(None),
57        }
58    }
59
60    #[inline(always)]
61    fn with<T>(&self, f: impl FnOnce(&mut Inner) -> T) -> T {
62        let inner = self.inner.get();
63        let inner = unsafe { Option::as_mut(&mut *inner).unwrap() };
64        f(inner)
65    }
66}
67
68#[inline(always)]
69fn with<T>(f: impl FnOnce(&mut Inner) -> T) -> T {
70    POOL.with(|pool| pool.with(f))
71}
72
73/// The size of a buffer, in bytes (4 KiB)
74pub const BUF_SIZE: u16 = 4096;
75
76pub fn is_allocator_initialized() -> bool {
77    POOL.with(|pool| unsafe { (*pool.inner.get()).is_some() })
78}
79
80/// Initializes the allocator with the given number of buffers
81pub fn initialize_allocator_with_num_bufs(num_bufs: u32) -> Result<()> {
82    POOL.with(|pool| {
83        if unsafe { (*pool.inner.get()).is_some() } {
84            return Ok(());
85        }
86
87        let mut inner = Inner {
88            ptr: std::ptr::null_mut(),
89            _mmap: None,
90            free: VecDeque::from_iter(0..num_bufs),
91            ref_counts: vec![0; num_bufs as usize],
92        };
93
94        let alloc_len = num_bufs as usize * BUF_SIZE as usize;
95
96        #[cfg(feature = "miri")]
97        {
98            let mut map = vec![0; alloc_len];
99            inner.ptr = map.as_mut_ptr();
100            std::mem::forget(map);
101        }
102
103        #[cfg(not(feature = "miri"))]
104        {
105            let mut map = memmap2::MmapOptions::new().len(alloc_len).map_anon()?;
106            inner.ptr = map.as_mut_ptr();
107            inner._mmap = Some(map);
108        }
109
110        unsafe {
111            (*pool.inner.get()) = Some(inner);
112        }
113
114        Ok(())
115    })
116}
117
118/// Returns the number of free buffers in the pool
119pub fn num_free() -> usize {
120    with(|inner| inner.free.len())
121}
122
123/// Allocate a buffer
124pub fn alloc() -> Result<BufMut> {
125    with(|inner| {
126        if let Some(index) = inner.free.pop_front() {
127            inner.ref_counts[index as usize] += 1;
128            Ok(BufMut {
129                index,
130                off: 0,
131                len: BUF_SIZE as _,
132                _non_send: PhantomData,
133            })
134        } else {
135            Err(Error::OutOfMemory)
136        }
137    })
138}
139
140/// Increment the reference count of the given buffer
141pub fn inc(index: u32) {
142    with(|inner| {
143        inner.ref_counts[index as usize] += 1;
144    })
145}
146
147/// Decrement the reference count of the given buffer.
148/// If the reference count reaches zero, the buffer is freed.
149pub fn dec(index: u32) {
150    with(|inner| {
151        let slot = &mut inner.ref_counts[index as usize];
152        *slot -= 1;
153        if *slot == 0 {
154            inner.free.push_back(index);
155        }
156    })
157}
158
159/// Returns a pointer to the start of the buffer
160#[inline(always)]
161pub unsafe fn base_ptr_with_offset(index: u32, offset: isize) -> *mut u8 {
162    with(|inner| {
163        inner
164            .ptr
165            .byte_offset(offset + index as isize * BUF_SIZE as isize)
166    })
167}