1mod bitmap;
6
7use core::{
8 alloc::Layout,
9 mem::{self, MaybeUninit},
10 num::NonZeroUsize,
11 panic,
12 ptr::{self, NonNull},
13 sync::atomic::{AtomicPtr, AtomicUsize, Ordering::*},
14};
15
16use self::bitmap::Bitmap;
17use crate::{
18 base::{BaseAlloc, Chunk},
19 slab::{Slab, SlabRef},
20 track,
21};
22
23const BYTE_WIDTH: usize = u8::BITS as usize;
24
25pub const SLAB_SHIFT: usize = 2 + 10 + 10;
26pub const SLAB_SIZE: usize = 1 << SLAB_SHIFT;
27
28pub const fn slab_layout(n: usize) -> Layout {
29 match Layout::from_size_align(n << SLAB_SHIFT, SLAB_SIZE) {
30 Ok(layout) => layout,
31 Err(_) => panic!("invalid slab layout"),
32 }
33}
34
35pub(crate) const SHARD_SHIFT: usize = 6 + 10;
36pub(crate) const SHARD_SIZE: usize = 1 << SHARD_SHIFT;
37
38pub(crate) const SHARD_COUNT: usize = SLAB_SIZE / SHARD_SIZE;
39
40struct Arena<B: BaseAlloc> {
41 arena_id: usize,
42 chunk: Chunk<B>,
43 header: Chunk<B>,
44 is_exclusive: bool,
45 slab_count: usize,
46 search_index: AtomicUsize,
47}
48
49impl<B: BaseAlloc> Arena<B> {
50 const LAYOUT: Layout = Layout::new::<Self>();
51
52 fn header_layout(slab_count: usize, is_exclusive: bool) -> Layout {
53 if !is_exclusive {
54 let bitmap_size = slab_count
55 .div_ceil(BYTE_WIDTH)
56 .next_multiple_of(mem::size_of::<AtomicUsize>());
57 let n = bitmap_size / mem::size_of::<AtomicUsize>();
58
59 let bitmap_layout = Layout::array::<AtomicUsize>(n).unwrap();
60 let (header_layout, offset) = Self::LAYOUT.extend(bitmap_layout).unwrap();
61 assert_eq!(offset, Self::LAYOUT.size());
62 header_layout
63 } else {
64 Self::LAYOUT
65 }
66 }
67
68 unsafe fn from_dynamic<'a>(
73 header: Chunk<B>,
74 chunk: Chunk<B>,
75 is_exclusive: bool,
76 slab_count: usize,
77 ) -> &'a mut Arena<B> {
78 let arena = unsafe {
80 let pointer = header.pointer().cast::<Arena<B>>();
81 pointer.as_uninit_mut().write(Arena {
82 arena_id: 0,
83 chunk,
84 header,
85 is_exclusive,
86 slab_count,
87 search_index: Default::default(),
88 })
89 };
90
91 if !is_exclusive {
92 unsafe {
94 let maybe = arena.bitmap_ptr().as_uninit_slice_mut();
95 maybe.fill(MaybeUninit::new(0));
96 }
97 let bitmap = arena.bitmap();
98 bitmap.set::<true>(slab_count.try_into().unwrap()..bitmap.len());
99 }
100
101 arena
102 }
103
104 fn new<'a>(
105 base: &B,
106 slab_count: usize,
107 align: Option<usize>,
108 is_exclusive: bool,
109 ) -> Result<&'a mut Self, Error<B>> {
110 let layout = match align {
111 Some(align) => slab_layout(slab_count)
112 .align_to(align)
113 .expect("invalid align"),
114 None => slab_layout(slab_count),
115 };
116
117 let header_layout = Self::header_layout(slab_count, is_exclusive);
118
119 let chunk = base.allocate(layout, is_exclusive).map_err(Error::Alloc)?;
120 let header = base.allocate(header_layout, true).map_err(Error::Alloc)?;
121
122 Ok(unsafe { Self::from_dynamic(header, chunk, is_exclusive, slab_count) })
124 }
125
126 fn new_chunk<'a>(base: &B, chunk: Chunk<B>) -> Result<&'a mut Self, Error<B>> {
127 let size = chunk.pointer().len();
128 let slab_count = size / SLAB_SIZE;
129 assert!(chunk.pointer().is_aligned_to(SLAB_SIZE));
130
131 let header_layout = Self::header_layout(slab_count, false);
132 let header = base.allocate(header_layout, true).map_err(Error::Alloc)?;
133
134 Ok(unsafe { Self::from_dynamic(header, chunk, false, slab_count) })
135 }
136
137 unsafe fn drop(arena: NonNull<Self>) {
141 drop(unsafe { arena.read() });
143 }
144
145 fn bitmap_ptr(&self) -> NonNull<[u8]> {
146 let (ptr, _) = self.header.pointer().to_raw_parts();
147 NonNull::from_raw_parts(
148 ptr.map_addr(|addr| addr.checked_add(Self::LAYOUT.size()).unwrap()),
149 self.slab_count
150 .div_ceil(BYTE_WIDTH)
151 .next_multiple_of(mem::size_of::<AtomicUsize>()),
152 )
153 }
154
155 fn bitmap(&self) -> &Bitmap {
156 let (ptr, len) = self.bitmap_ptr().to_raw_parts();
157 let slice = NonNull::from_raw_parts(ptr, len / mem::size_of::<AtomicUsize>());
158 Bitmap::new(unsafe { slice.as_ref() })
160 }
161
162 fn allocate_slices(&self, count: usize) -> Option<NonNull<[u8]>> {
163 let start = self.search_index.load(Relaxed);
164 let (idx, bit) = self.bitmap().allocate(start, count.try_into().ok()?)?;
165 self.search_index.store(idx, Relaxed);
166
167 let offset = (idx * BYTE_WIDTH + (bit as usize)) * SLAB_SIZE;
168 let data = unsafe { self.chunk.pointer().byte_add(offset) };
171 Some(NonNull::from_raw_parts(data.cast(), SLAB_SIZE * count))
172 }
173
174 #[cfg(debug_assertions)]
175 fn check_ptr(&self, ptr: NonNull<u8>) -> bool {
176 let addr = ptr.as_ptr().addr();
177 let (origin, size) = self.chunk.pointer().to_raw_parts();
178 let origin = origin.as_ptr().addr();
179 (origin..origin + size).contains(&addr)
180 }
181
182 fn allocate(
183 &self,
184 thread_id: u64,
185 count: usize,
186 align: usize,
187 is_large_or_huge: bool,
188 base: &B,
189 ) -> Option<Result<SlabRef, Error<B>>> {
190 debug_assert!(align <= SLAB_SIZE);
191 let ptr = self.allocate_slices(count)?;
192
193 let (addr, _) = ptr.to_raw_parts();
194 let commit = unsafe { base.commit(NonNull::from_raw_parts(addr, mem::size_of::<Slab>())) };
195 let res = commit.map(|_| unsafe {
197 Slab::init(
198 ptr,
199 thread_id,
200 self.arena_id,
201 is_large_or_huge,
202 B::IS_ZEROED,
203 )
204 });
205 Some(res.map_err(Error::Commit))
206 }
207
208 unsafe fn deallocate(&self, slab: SlabRef, base: &B) -> usize {
214 let raw = slab.into_raw();
215 unsafe { base.decommit(raw) };
216 let (ptr, len) = raw.to_raw_parts();
217 let offset = unsafe { ptr.cast::<u8>().sub_ptr(self.chunk.pointer().cast()) };
218
219 let (start, end) = (offset / SLAB_SIZE, (offset + len) / SLAB_SIZE);
220 self.bitmap().set::<false>((start as u32)..(end as u32));
221 end - start
222 }
223}
224
225const MAX_ARENAS: usize = 112;
226pub struct Arenas<B: BaseAlloc> {
234 pub(crate) base: B,
235 arenas: [AtomicPtr<Arena<B>>; MAX_ARENAS],
236 arena_count: AtomicUsize,
237 abandoned: AtomicPtr<()>,
239 abandoned_visited: AtomicPtr<()>,
240}
241
242impl<B: BaseAlloc> Arenas<B> {
243 #[allow(clippy::declare_interior_mutable_const)]
246 const ARENA_INIT: AtomicPtr<Arena<B>> = AtomicPtr::new(ptr::null_mut());
247
248 pub const fn new(base: B) -> Self {
250 Arenas {
251 base,
252 arenas: [Self::ARENA_INIT; MAX_ARENAS],
253 arena_count: AtomicUsize::new(0),
254 abandoned: AtomicPtr::new(ptr::null_mut()),
256 abandoned_visited: AtomicPtr::new(ptr::null_mut()),
257 }
258 }
259
260 fn push_arena<'a>(&'a self, arena: &'a mut Arena<B>) -> Result<&'a Arena<B>, Error<B>> {
261 let mut index = self.arena_count.load(Relaxed);
262 loop {
263 break if index < MAX_ARENAS {
264 if let Err(i) = self
265 .arena_count
266 .compare_exchange(index, index + 1, AcqRel, Acquire)
267 {
268 index = i;
269 continue;
270 }
271 if self.arenas[index]
272 .compare_exchange(ptr::null_mut(), arena, AcqRel, Acquire)
273 .is_err()
274 {
275 index = self.arena_count.load(Relaxed);
276 continue;
277 }
278 arena.arena_id = index + 1;
279 Ok(arena)
280 } else if let Some(index) = self.arenas.iter().position(|slot| {
281 slot.compare_exchange(ptr::null_mut(), arena, AcqRel, Acquire)
282 .is_ok()
283 }) {
284 arena.arena_id = index + 1;
285 Ok(arena)
286 } else {
287 #[cfg(not(err_on_exhaustion))]
288 panic!("ARENA EXHAUSTED");
289 #[cfg(err_on_exhaustion)]
290 {
291 unsafe { Arena::drop(arena.into()) };
293 Err(Error::ArenaExhausted)
294 }
295 };
296 }
297 }
298
299 fn push_abandoned(&self, slab: SlabRef) {
300 debug_assert!(slab.is_abandoned());
301 let mut next = self.abandoned.load(Relaxed);
302 loop {
303 slab.abandoned_next.store(next, Relaxed);
304 match self.abandoned.compare_exchange_weak(
305 next,
306 slab.as_ptr().as_ptr(),
307 AcqRel,
308 Acquire,
309 ) {
310 Ok(_) => break,
311 Err(e) => next = e,
312 }
313 }
314 }
315
316 fn push_abandoned_visited(&self, slab: SlabRef) {
317 debug_assert!(slab.is_abandoned());
318 let mut next = self.abandoned_visited.load(Relaxed);
319 loop {
320 slab.abandoned_next.store(next, Relaxed);
321 match self.abandoned_visited.compare_exchange_weak(
322 next,
323 slab.as_ptr().as_ptr(),
324 AcqRel,
325 Acquire,
326 ) {
327 Ok(_) => break,
328 Err(e) => next = e,
329 }
330 }
331 }
332
333 fn reappend_abandoned_visited(&self) -> bool {
334 if self.abandoned_visited.load(Relaxed).is_null() {
335 return false;
336 }
337 let first = self.abandoned_visited.swap(ptr::null_mut(), AcqRel);
338 if first.is_null() {
339 return false;
340 };
341
342 if self.abandoned.load(Relaxed).is_null()
343 && self
344 .abandoned
345 .compare_exchange(ptr::null_mut(), first, AcqRel, Acquire)
346 .is_ok()
347 {
348 return true;
349 }
350
351 let mut last = first;
352 let last = loop {
353 let next = unsafe { (*last.cast::<Slab>()).abandoned_next.load(Relaxed) };
354 last = match next.is_null() {
355 true => break last,
356 false => next,
357 };
358 };
359
360 let mut next = self.abandoned.load(Relaxed);
361 loop {
362 unsafe { (*last.cast::<Slab>()).abandoned_next.store(next, Relaxed) };
363 match self
364 .abandoned
365 .compare_exchange_weak(next, first, AcqRel, Acquire)
366 {
367 Ok(_) => break,
368 Err(e) => next = e,
369 }
370 }
371
372 true
373 }
374
375 fn pop_abandoned(&self) -> Option<SlabRef> {
376 let mut next = self.abandoned.load(Relaxed);
377 if next.is_null() && !self.reappend_abandoned_visited() {
378 return None;
379 }
380 next = self.abandoned.load(Relaxed);
381 loop {
382 let ptr = NonNull::new(next)?;
383 let new_next = unsafe { (*next.cast::<Slab>()).abandoned_next.load(Relaxed) };
384 match self
385 .abandoned
386 .compare_exchange_weak(next, new_next, AcqRel, Acquire)
387 {
388 Ok(_) => break Some(unsafe { SlabRef::from_ptr(ptr) }),
389 Err(e) => next = e,
390 }
391 }
392 }
393
394 fn try_reclaim(
395 &self,
396 thread_id: u64,
397 count: usize,
398 align: usize,
399 is_large_or_huge: bool,
400 ) -> Option<SlabRef> {
401 const MAX_TRIAL: usize = 8;
402 let mut trial = MAX_TRIAL;
403 while trial > 0
404 && let Some(slab) = self.pop_abandoned()
405 {
406 if slab.collect_abandoned()
407 && slab.size >= count * SLAB_SIZE
408 && slab.as_ptr().is_aligned_to(align)
409 {
410 let aid = slab.arena_id;
411 let raw = slab.into_raw();
412 let slab = unsafe { Slab::init(raw, thread_id, aid, is_large_or_huge, false) };
413 return Some(slab);
414 }
415 self.push_abandoned_visited(slab);
416 trial -= 1;
417 }
418 None
419 }
420
421 fn all_arenas(&self) -> impl Iterator<Item = (usize, &Arena<B>)> {
422 let iter = self.arenas[..self.arena_count.load(Acquire)].iter();
423 iter.enumerate()
425 .filter_map(|(index, arena)| Some((index, unsafe { arena.load(Acquire).as_ref() }?)))
426 }
427
428 fn arenas(&self, is_exclusive: bool) -> impl Iterator<Item = (usize, &Arena<B>)> {
429 self.all_arenas()
430 .filter(move |(_, arena)| arena.is_exclusive == is_exclusive)
431 }
432
433 pub fn base(&self) -> &B {
435 &self.base
436 }
437
438 pub fn manage(&self, chunk: Chunk<B>) -> Result<(), Error<B>> {
450 let arena = Arena::new_chunk(&self.base, chunk)?;
451 self.push_arena(arena)?;
452 Ok(())
453 }
454
455 #[cfg(debug_assertions)]
456 pub(crate) fn check_ptr(&self, ptr: NonNull<u8>) -> bool {
457 self.all_arenas().any(|(_, arena)| arena.check_ptr(ptr))
458 }
459
460 pub(crate) fn allocate(
461 &self,
462 thread_id: u64,
463 count: NonZeroUsize,
464 align: usize,
465 is_large_or_huge: bool,
466 ) -> Result<SlabRef, Error<B>> {
467 let count = count.get();
468
469 let reclaimed = self.try_reclaim(thread_id, count, align, is_large_or_huge);
470 let ret = match reclaimed.map(Ok).or_else(|| {
471 self.arenas(false).find_map(|(_, arena)| {
472 arena.allocate(thread_id, count, align, is_large_or_huge, &self.base)
473 })
474 }) {
475 Some(slab) => slab?,
476 None => {
477 const MIN_RESERVE_COUNT: usize = 32;
478
479 let reserve_count = count.max(MIN_RESERVE_COUNT)
480 ;
481 let arena = Arena::new(&self.base, reserve_count, Some(align), false)?;
482 let arena = self.push_arena(arena)?;
483 let res = arena.allocate(thread_id, count, align, is_large_or_huge, &self.base);
484 res.unwrap()?
485 }
486 };
487 Ok(ret)
489 }
490
491 pub(crate) unsafe fn deallocate(&self, slab: SlabRef) {
495 if !slab.is_abandoned() {
496 let arena = self.arenas[slab.arena_id - 1].load(Acquire);
497 debug_assert!(!arena.is_null());
498 let _slab_count = unsafe { (*arena).deallocate(slab, &self.base) };
501 } else {
503 self.push_abandoned(slab)
504 }
505 }
506
507 pub fn allocate_direct(&self, layout: Layout, zero: bool) -> Result<NonNull<[u8]>, Error<B>> {
513 let arena = Arena::new(
514 &self.base,
515 layout.size().div_ceil(SLAB_SIZE),
516 Some(layout.align()),
517 true,
518 )?;
519 let arena = self.push_arena(arena)?;
520 Ok(NonNull::from_raw_parts(
521 arena.chunk.pointer().cast(),
522 layout.size(),
523 ))
524 .inspect(|&ptr| crate::heap::post_alloc(ptr, B::IS_ZEROED, zero))
525 }
526
527 pub fn layout_of_direct(&self, ptr: NonNull<u8>) -> Option<Layout> {
532 self.arenas(true)
533 .find(|(_, arena)| arena.chunk.pointer().cast() == ptr)
534 .map(|(_, arena)| arena.chunk.layout())
535 }
536
537 pub unsafe fn deallocate_direct(&self, ptr: NonNull<u8>) {
547 if let Some((index, _)) = self
548 .arenas(true)
549 .find(|(_, arena)| arena.chunk.pointer().cast() == ptr)
550 {
551 track::deallocate(ptr, 0);
552 let arena = self.arenas[index].swap(ptr::null_mut(), AcqRel);
553 debug_assert!(!arena.is_null());
554 unsafe { Arena::drop(NonNull::new_unchecked(arena)) }
556 } else {
557 #[cfg(debug_assertions)]
558 panic!("deallocating memory not from these arenas: {ptr:p}")
559 }
560 }
561}
562
563impl<B: BaseAlloc> Drop for Arenas<B> {
564 fn drop(&mut self) {
565 let iter = self.arenas[..*self.arena_count.get_mut()].iter_mut();
566 iter.filter_map(|arena| NonNull::new(mem::replace(arena.get_mut(), ptr::null_mut())))
567 .for_each(|arena| unsafe { Arena::drop(arena) })
569 }
570}
571
572#[derive(Debug)]
574pub enum Error<B: BaseAlloc> {
575 Alloc(B::Error),
577 Commit(B::Error),
579 ArenaExhausted,
581 Uninit,
583}
584
585impl<B: BaseAlloc> core::fmt::Display for Error<B>
586where
587 B::Error: core::fmt::Display,
588{
589 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
590 match self {
591 Error::Alloc(err) => write!(f, "base allocation failed: {err}"),
592 Error::Commit(err) => write!(f, "base commission failed: {err}"),
593 Error::ArenaExhausted => write!(f, "the arena collection is full of arenas"),
594 Error::Uninit => write!(f, "uninitialized heap"),
595 }
596 }
597}