rarena_allocator/
unsync.rs

1use core::{
2  fmt,
3  mem::{self, MaybeUninit},
4  ptr::{self, NonNull},
5  slice,
6};
7
8use super::{common::*, sealed::Sealed, *};
9
10#[allow(unused_imports)]
11use std::boxed::Box;
12
13#[cfg(feature = "std")]
14type Memory =
15  super::memory::Memory<UnsafeCell<usize>, std::rc::Rc<std::path::PathBuf>, sealed::Header>;
16
17#[cfg(not(feature = "std"))]
18type Memory = super::memory::Memory<UnsafeCell<usize>, std::rc::Rc<()>, sealed::Header>;
19
20const SEGMENT_NODE_SIZE: usize = mem::size_of::<SegmentNode>();
21
22mod sealed {
23  use super::*;
24
25  #[derive(Debug)]
26  #[repr(C, align(8))]
27  pub struct Header {
28    /// The sentinel node for the ordered free list.
29    pub(super) sentinel: SegmentNode,
30    pub(super) allocated: u32,
31    pub(super) min_segment_size: u32,
32    pub(super) discarded: u32,
33  }
34
35  impl super::super::sealed::Header for Header {
36    #[inline]
37    fn new(size: u32, min_segment_size: u32) -> Self {
38      Self {
39        allocated: size,
40        sentinel: SegmentNode::sentinel(),
41        min_segment_size,
42        discarded: 0,
43      }
44    }
45
46    #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
47    #[inline]
48    fn load_allocated(&self) -> u32 {
49      self.allocated
50    }
51
52    #[inline]
53    fn load_min_segment_size(&self) -> u32 {
54      self.min_segment_size
55    }
56  }
57}
58
59#[repr(transparent)]
60struct SegmentNode(
61  /// The first 32 bits are the size of the memory,
62  /// the last 32 bits are the offset of the next segment node.
63  UnsafeCell<u64>,
64);
65
66impl core::fmt::Debug for SegmentNode {
67  fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
68    let (offset, next) = decode_segment_node(*self.0.as_inner_ref());
69    f.debug_struct("SegmentNode")
70      .field("offset", &offset)
71      .field("next", &next)
72      .finish()
73  }
74}
75
76impl core::ops::Deref for SegmentNode {
77  type Target = UnsafeCell<u64>;
78
79  #[inline]
80  fn deref(&self) -> &Self::Target {
81    &self.0
82  }
83}
84
85impl SegmentNode {
86  #[inline]
87  fn sentinel() -> Self {
88    Self(UnsafeCell::new(encode_segment_node(
89      SENTINEL_SEGMENT_NODE_OFFSET,
90      SENTINEL_SEGMENT_NODE_OFFSET,
91    )))
92  }
93
94  #[allow(clippy::mut_from_ref)]
95  #[inline]
96  fn as_inner_mut(&self) -> &mut u64 {
97    unsafe { &mut *self.0.as_inner_mut() }
98  }
99}
100
101#[derive(Debug, Copy, Clone, PartialEq, Eq)]
102struct Segment {
103  ptr: *mut u8,
104  ptr_offset: u32,
105  data_offset: u32,
106  data_size: u32,
107}
108
109impl Segment {
110  /// ## Safety
111  /// - offset must be a well-aligned and in-bounds `u64` pointer.
112  #[inline]
113  unsafe fn from_offset(arena: &Arena, offset: u32, data_size: u32) -> Self {
114    Self {
115      ptr: arena.ptr,
116      ptr_offset: offset,
117      data_offset: offset + SEGMENT_NODE_SIZE as u32,
118      data_size,
119    }
120  }
121
122  #[inline]
123  fn as_mut(&mut self) -> &mut SegmentNode {
124    // Safety: when constructing the Segment, we have checked the ptr_offset is in bounds and well-aligned.
125    unsafe {
126      let ptr = self.ptr.add(self.ptr_offset as usize);
127      &mut *ptr.cast::<SegmentNode>()
128    }
129  }
130
131  #[inline]
132  fn update_next_node(&mut self, next: u32) {
133    *self.as_mut().as_inner_mut() = encode_segment_node(self.data_size, next);
134  }
135}
136
137/// Arena should be lock-free
138pub struct Arena {
139  ptr: *mut u8,
140  cap: u32,
141  inner: NonNull<Memory>,
142
143  // Once constructed, the below fields are immutable
144  reserved: usize,
145  data_offset: u32,
146  flag: MemoryFlags,
147  max_retries: u8,
148  unify: bool,
149  magic_version: u16,
150  version: u16,
151  ro: bool,
152  freelist: Freelist,
153  page_size: u32,
154}
155
156impl fmt::Debug for Arena {
157  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
158    let header = self.header();
159    let allocated = header.allocated;
160
161    f.debug_struct("Arena")
162      .field("cap", &self.cap)
163      .field("data_offset", &self.data_offset)
164      .field("allocated", &allocated)
165      .field("discarded", &header.discarded)
166      .field("freelist", &self.freelist)
167      .field("page_size", &self.page_size)
168      .field("reserved", &self.reserved)
169      .field("magic_version", &self.magic_version)
170      .field("version", &self.version)
171      .field("read_only", &self.ro)
172      .field("unify", &self.unify)
173      .finish()
174  }
175}
176
177impl Clone for Arena {
178  fn clone(&self) -> Self {
179    unsafe {
180      use super::sealed::RefCounter;
181
182      let memory = self.inner.as_ref();
183
184      let old_size = memory.refs().fetch_add(1, Ordering::Release);
185      if old_size > usize::MAX >> 1 {
186        dbutils::abort();
187      }
188
189      // Safety:
190      // The ptr is always non-null, and the data is only deallocated when the
191      // last Arena is dropped.
192      Self {
193        max_retries: self.max_retries,
194        reserved: self.reserved,
195        flag: self.flag,
196        magic_version: self.magic_version,
197        version: self.version,
198        ptr: self.ptr,
199        data_offset: self.data_offset,
200        ro: self.ro,
201        inner: self.inner,
202        unify: self.unify,
203        cap: self.cap,
204        freelist: self.freelist,
205        page_size: self.page_size,
206      }
207    }
208  }
209}
210
211impl From<Memory> for Arena {
212  fn from(memory: Memory) -> Self {
213    let ptr = memory.as_mut_ptr();
214
215    Self {
216      reserved: memory.reserved(),
217      freelist: memory.freelist(),
218      cap: memory.cap(),
219      flag: memory.flag(),
220      unify: memory.unify(),
221      magic_version: memory.magic_version(),
222      version: memory.version(),
223      ptr,
224      ro: memory.read_only(),
225      max_retries: memory.maximum_retries(),
226      data_offset: memory.data_offset() as u32,
227      inner: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(memory)) as _) },
228      page_size: *PAGE_SIZE,
229    }
230  }
231}
232
233impl Sealed for Arena {
234  #[cfg(feature = "std")]
235  type PathRefCounter = std::rc::Rc<std::path::PathBuf>;
236
237  #[cfg(not(feature = "std"))]
238  type PathRefCounter = std::rc::Rc<()>;
239
240  type RefCounter = UnsafeCell<usize>;
241
242  type Header = sealed::Header;
243}
244
245impl AsRef<Memory> for Arena {
246  #[inline]
247  fn as_ref(&self) -> &Memory {
248    // Safety: the inner is always non-null.
249    unsafe { self.inner.as_ref() }
250  }
251}
252
253impl Allocator for Arena {
254  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
255  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
256  type Path = std::rc::Rc<std::path::PathBuf>;
257
258  #[inline]
259  fn reserved_bytes(&self) -> usize {
260    self.reserved
261  }
262
263  #[inline]
264  fn reserved_slice(&self) -> &[u8] {
265    if self.reserved == 0 {
266      return &[];
267    }
268
269    // Safety: the ptr is always non-null.
270    unsafe { slice::from_raw_parts(self.ptr, self.reserved) }
271  }
272
273  #[inline]
274  unsafe fn reserved_slice_mut(&self) -> &mut [u8] {
275    unsafe {
276      if self.reserved == 0 {
277        return &mut [];
278      }
279
280      if self.ro {
281        panic!("ARENA is read-only");
282      }
283
284      slice::from_raw_parts_mut(self.ptr, self.reserved)
285    }
286  }
287
288  #[inline]
289  unsafe fn alloc<T>(&self) -> Result<RefMut<'_, T, Self>, Error> {
290    if mem::size_of::<T>() == 0 {
291      return Ok(RefMut::new_zst(self));
292    }
293
294    let allocated = self
295      .alloc_in::<T>()?
296      .expect("allocated size is not zero, but get None");
297    let ptr = unsafe { self.get_aligned_pointer_mut::<T>(allocated.memory_offset as usize) };
298    if mem::needs_drop::<T>() {
299      unsafe {
300        let ptr: *mut MaybeUninit<T> = ptr.as_ptr().cast();
301        ptr::write(ptr, MaybeUninit::uninit());
302
303        Ok(RefMut::new(ptr::read(ptr), allocated, self))
304      }
305    } else {
306      Ok(RefMut::new_inline(ptr, allocated, self))
307    }
308  }
309
310  #[inline]
311  fn alloc_aligned_bytes<T>(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error> {
312    self.alloc_aligned_bytes_in::<T>(size).map(|a| match a {
313      None => BytesRefMut::null(self),
314      Some(allocated) => unsafe { BytesRefMut::new(self, allocated) },
315    })
316  }
317
318  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
319  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
320  // #[inline]
321  // fn alloc_aligned_bytes_within_page<T>(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error> {
322  //   self
323  //     .alloc_aligned_bytes_within_page_in::<T>(size)
324  //     .map(|a| match a {
325  //       None => BytesRefMut::null(self),
326  //       Some(allocated) => unsafe { BytesRefMut::new(self, allocated) },
327  //     })
328  // }
329
330  #[inline]
331  fn alloc_bytes(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error> {
332    self.alloc_bytes_in(size).map(|a| match a {
333      None => BytesRefMut::null(self),
334      Some(allocated) => unsafe { BytesRefMut::new(self, allocated) },
335    })
336  }
337
338  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
339  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
340  // #[inline]
341  // fn alloc_bytes_within_page(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error> {
342  //   self.alloc_bytes_within_page_in(size).map(|a| match a {
343  //     None => BytesRefMut::null(self),
344  //     Some(allocated) => unsafe { BytesRefMut::new(self, allocated) },
345  //   })
346  // }
347
348  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
349  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
350  // #[inline]
351  // unsafe fn alloc_within_page<T>(&self) -> Result<RefMut<'_, T, Self>, Error> {
352  //   if mem::size_of::<T>() == 0 {
353  //     return Ok(RefMut::new_zst(self));
354  //   }
355
356  //   let allocated = self
357  //     .alloc_within_page_in::<T>()?
358  //     .expect("allocated size is not zero, but get None");
359  //   let ptr = unsafe { self.get_aligned_pointer_mut::<T>(allocated.memory_offset as usize) };
360  //   if mem::needs_drop::<T>() {
361  //     unsafe {
362  //       let ptr: *mut MaybeUninit<T> = ptr.as_ptr().cast();
363  //       ptr::write(ptr, MaybeUninit::uninit());
364
365  //       Ok(RefMut::new(ptr::read(ptr), allocated, self))
366  //     }
367  //   } else {
368  //     Ok(RefMut::new_inline(ptr, allocated, self))
369  //   }
370  // }
371
372  #[inline]
373  fn allocated(&self) -> usize {
374    self.header().allocated as usize
375  }
376
377  #[inline]
378  fn raw_mut_ptr(&self) -> *mut u8 {
379    self.ptr
380  }
381
382  #[inline]
383  fn raw_ptr(&self) -> *const u8 {
384    self.ptr
385  }
386
387  unsafe fn clear(&self) -> Result<(), Error> {
388    unsafe {
389      if self.ro {
390        return Err(Error::ReadOnly);
391      }
392
393      let memory = &mut *self.inner.as_ptr();
394      memory.clear();
395
396      Ok(())
397    }
398  }
399
400  #[inline]
401  fn data_offset(&self) -> usize {
402    self.data_offset as usize
403  }
404
405  #[inline]
406  unsafe fn dealloc(&self, offset: u32, size: u32) -> bool {
407    // first try to deallocate the memory back to the main memory.
408    let header = self.header_mut();
409    // if the offset + size is the current allocated size, then we can deallocate the memory back to the main memory.
410    if header.allocated == offset + size {
411      header.allocated = offset;
412      return true;
413    }
414
415    match self.freelist {
416      Freelist::None => {
417        self.increase_discarded(size);
418        true
419      }
420      Freelist::Optimistic => self.optimistic_dealloc(offset, size),
421      Freelist::Pessimistic => self.pessimistic_dealloc(offset, size),
422    }
423  }
424
425  #[inline]
426  fn discard_freelist(&self) -> Result<u32, Error> {
427    if self.ro {
428      return Err(Error::ReadOnly);
429    }
430
431    Ok(match self.freelist {
432      Freelist::None => 0,
433      _ => self.discard_freelist_in(),
434    })
435  }
436
437  #[inline]
438  fn discarded(&self) -> u32 {
439    self.header().discarded
440  }
441
442  #[inline]
443  fn increase_discarded(&self, size: u32) {
444    #[cfg(feature = "tracing")]
445    tracing::debug!("discard {size} bytes");
446
447    self.header_mut().discarded += size;
448  }
449
450  #[inline]
451  fn magic_version(&self) -> u16 {
452    self.magic_version
453  }
454
455  #[inline]
456  fn minimum_segment_size(&self) -> u32 {
457    self.header().min_segment_size
458  }
459
460  #[inline]
461  fn set_minimum_segment_size(&self, size: u32) {
462    self.header_mut().min_segment_size = size;
463  }
464
465  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
466  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
467  #[inline]
468  fn path(&self) -> Option<&Self::Path> {
469    // Safety: the inner is always non-null, we only deallocate it when the memory refs is 1.
470    unsafe { self.inner.as_ref().path() }
471  }
472
473  #[inline]
474  unsafe fn offset(&self, ptr: *const u8) -> usize {
475    unsafe {
476      let offset = ptr.offset_from(self.ptr);
477      offset as usize
478    }
479  }
480
481  #[inline]
482  fn page_size(&self) -> usize {
483    self.page_size as usize
484  }
485
486  #[inline]
487  fn refs(&self) -> usize {
488    unsafe { *self.inner.as_ref().refs().as_inner_ref() }
489  }
490
491  #[inline]
492  fn remaining(&self) -> usize {
493    (self.cap as usize).saturating_sub(self.allocated())
494  }
495
496  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
497  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
498  #[inline]
499  fn remove_on_drop(&self, remove_on_drop: bool) {
500    // Safety: the inner is always non-null, we only deallocate it when the memory refs is 1.
501    unsafe { (*self.inner.as_ptr()).set_remove_on_drop(remove_on_drop) }
502  }
503
504  unsafe fn rewind(&self, pos: ArenaPosition) {
505    let data_offset = self.data_offset;
506    let cap = self.cap;
507    let header = self.header_mut();
508    let allocated = header.allocated;
509    let final_offset = match pos {
510      ArenaPosition::Start(offset) => offset.max(data_offset).min(cap),
511      ArenaPosition::Current(offset) => {
512        let offset = allocated as i64 + offset;
513        #[allow(clippy::comparison_chain)]
514        if offset > 0 {
515          if offset >= (cap as i64) {
516            cap
517          } else {
518            let offset = offset as u32;
519            offset.max(data_offset).min(cap)
520          }
521        } else if offset < 0 {
522          data_offset
523        } else {
524          return;
525        }
526      }
527      ArenaPosition::End(offset) => match cap.checked_sub(offset) {
528        Some(val) => val.max(data_offset),
529        None => data_offset,
530      },
531    };
532
533    header.allocated = final_offset;
534  }
535
536  #[inline]
537  fn version(&self) -> u16 {
538    self.version
539  }
540}
541
542impl Arena {
543  /// Truncate the ARENA to a new capacity.
544  ///
545  /// **Note:** If the new capacity is less than the current allocated size, then the ARENA will be truncated to the allocated size.
546  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
547  pub fn truncate(&mut self, mut size: usize) -> std::io::Result<()> {
548    if self.ro {
549      return Err(std::io::Error::new(
550        std::io::ErrorKind::PermissionDenied,
551        "ARENA is read-only",
552      ));
553    }
554
555    let allocated = self.allocated();
556    if allocated >= size {
557      size = allocated;
558    }
559
560    unsafe {
561      let memory = self.inner.as_mut();
562      memory.truncate(allocated, size)?;
563      self.ptr = memory.as_mut_ptr();
564      self.cap = memory.cap();
565    }
566    Ok(())
567  }
568
569  /// Truncate the ARENA to a new capacity.
570  ///
571  /// **Note:** If the new capacity is less than the current allocated size, then the ARENA will be truncated to the allocated size.
572  #[cfg(not(all(feature = "memmap", not(target_family = "wasm"))))]
573  pub fn truncate(&mut self, mut size: usize) {
574    let allocated = self.allocated();
575    if allocated >= size {
576      size = allocated;
577    }
578
579    unsafe {
580      let memory = self.inner.as_mut();
581      memory.truncate(allocated, size);
582      self.ptr = memory.as_mut_ptr();
583      self.cap = memory.cap();
584    }
585  }
586
587  #[inline]
588  fn header(&self) -> &sealed::Header {
589    // Safety:
590    // The inner is always non-null, we only deallocate it when the memory refs is 1.
591    unsafe { (*self.inner.as_ptr()).header() }
592  }
593
594  #[allow(clippy::mut_from_ref)]
595  #[inline]
596  fn header_mut(&self) -> &mut sealed::Header {
597    // Safety:
598    // The inner is always non-null, we only deallocate it when the memory refs is 1.
599    unsafe { (*self.inner.as_ptr()).header_mut() }
600  }
601
602  /// Returns the free list position to insert the value.
603  /// - `None` means that we should insert to the head.
604  /// - `Some(offset)` means that we should insert after the offset. offset -> new -> next
605  fn find_position(&self, val: u32, check: impl Fn(u32, u32) -> bool) -> (u64, &UnsafeCell<u64>) {
606    let header = self.header_mut();
607    let mut current: &UnsafeCell<u64> = &header.sentinel;
608    let mut current_node = current.as_inner_ref();
609    let (mut current_node_size, mut next_offset) = decode_segment_node(*current_node);
610
611    loop {
612      // the list is empty
613      if current_node_size == SENTINEL_SEGMENT_NODE_SIZE
614        && next_offset == SENTINEL_SEGMENT_NODE_OFFSET
615      {
616        return (*current_node, current);
617      }
618
619      // the current is marked as remove and the next is the tail.
620      if next_offset == SENTINEL_SEGMENT_NODE_OFFSET {
621        return (*current_node, current);
622      }
623
624      // the next is the tail, then we should insert the value after the current node.
625      if next_offset == SENTINEL_SEGMENT_NODE_OFFSET {
626        return (*current_node, current);
627      }
628
629      let next = self.get_segment_node(next_offset);
630      let next_node = next.as_inner_ref();
631      let (next_node_size, next_next_offset) = decode_segment_node(*next_node);
632
633      if check(val, next_node_size) {
634        return (*current_node, current);
635      }
636
637      current = next;
638      current_node = next_node;
639      current_node_size = next_node_size;
640      next_offset = next_next_offset;
641    }
642  }
643
644  #[allow(clippy::type_complexity)]
645  fn find_prev_and_next(
646    &self,
647    val: u32,
648    check: impl Fn(u32, u32) -> bool,
649  ) -> Option<((u64, &UnsafeCell<u64>), (u64, &UnsafeCell<u64>))> {
650    let header = self.header();
651    let mut current: &UnsafeCell<u64> = &header.sentinel;
652    let mut current_node = current.as_inner_ref();
653    let (mut current_node_size, mut next_offset) = decode_segment_node(*current_node);
654
655    loop {
656      // the list is empty
657      if current_node_size == SENTINEL_SEGMENT_NODE_SIZE
658        && next_offset == SENTINEL_SEGMENT_NODE_OFFSET
659      {
660        return None;
661      }
662
663      // the current is marked as remove and the next is the tail.
664      if next_offset == SENTINEL_SEGMENT_NODE_OFFSET {
665        return None;
666      }
667
668      // the next is the tail
669      if next_offset == SENTINEL_SEGMENT_NODE_OFFSET {
670        return None;
671      }
672
673      let next = self.get_segment_node(next_offset);
674      let next_node = next.as_inner_ref();
675      let (next_node_size, next_next_offset) = decode_segment_node(*next_node);
676
677      if check(val, next_node_size) {
678        return Some(((*current_node, current), (*next_node, next)));
679      }
680
681      current = self.get_segment_node(next_offset);
682      current_node = next_node;
683      current_node_size = next_node_size;
684      next_offset = next_next_offset;
685    }
686  }
687
688  fn optimistic_dealloc(&self, offset: u32, size: u32) -> bool {
689    // check if we have enough space to allocate a new segment in this segment.
690    let Some(mut segment_node) = self.try_new_segment(offset, size) else {
691      return false;
692    };
693
694    loop {
695      let (current_node_size_and_next_node_offset, current) = self
696        .find_position(segment_node.data_size, |val, next_node_size| {
697          val >= next_node_size
698        });
699      let (node_size, next_node_offset) =
700        decode_segment_node(current_node_size_and_next_node_offset);
701
702      if segment_node.ptr_offset == next_node_offset {
703        continue;
704      }
705
706      segment_node.update_next_node(next_node_offset);
707
708      *current.as_inner_ref_mut() = encode_segment_node(node_size, segment_node.ptr_offset);
709
710      #[cfg(feature = "tracing")]
711      tracing::debug!(
712        "create segment node ({} bytes) at {}, next segment {next_node_offset}",
713        segment_node.data_size,
714        segment_node.ptr_offset
715      );
716
717      self.increase_discarded(segment_node.data_offset - segment_node.ptr_offset);
718      return true;
719    }
720  }
721
722  fn pessimistic_dealloc(&self, offset: u32, size: u32) -> bool {
723    // check if we have enough space to allocate a new segment in this segment.
724    let Some(mut segment_node) = self.try_new_segment(offset, size) else {
725      return false;
726    };
727
728    let (current_node_size_and_next_node_offset, current) = self
729      .find_position(segment_node.data_size, |val, next_node_size| {
730        val <= next_node_size
731      });
732    let (node_size, next_node_offset) = decode_segment_node(current_node_size_and_next_node_offset);
733
734    segment_node.update_next_node(next_node_offset);
735
736    *current.as_inner_ref_mut() = encode_segment_node(node_size, segment_node.ptr_offset);
737
738    #[cfg(feature = "tracing")]
739    tracing::debug!(
740      "create segment node ({} bytes) at {}, next segment {next_node_offset}",
741      segment_node.data_size,
742      segment_node.ptr_offset
743    );
744
745    self.increase_discarded(segment_node.data_offset - segment_node.ptr_offset);
746    true
747  }
748
749  fn alloc_bytes_in(&self, size: u32) -> Result<Option<Meta>, Error> {
750    if self.ro {
751      return Err(Error::ReadOnly);
752    }
753
754    if size == 0 {
755      return Ok(None);
756    }
757    let header = self.header_mut();
758
759    let want = header.allocated + size;
760    if want <= self.cap {
761      let offset = header.allocated;
762      header.allocated = want;
763
764      #[cfg(feature = "tracing")]
765      tracing::debug!("allocate {} bytes at offset {} from memory", size, offset);
766
767      let allocated = Meta::new(self.ptr as _, offset, size);
768      unsafe { allocated.clear(self) };
769      return Ok(Some(allocated));
770    }
771
772    // allocate through slow path
773    match self.freelist {
774      Freelist::None => Err(Error::InsufficientSpace {
775        requested: size,
776        available: self.remaining() as u32,
777      }),
778      Freelist::Optimistic => match self.alloc_slow_path_optimistic(size) {
779        Ok(bytes) => Ok(Some(bytes)),
780        Err(e) => Err(e),
781      },
782      Freelist::Pessimistic => match self.alloc_slow_path_pessimistic(size) {
783        Ok(bytes) => Ok(Some(bytes)),
784        Err(e) => Err(e),
785      },
786    }
787  }
788
789  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
790  // fn alloc_bytes_within_page_in(&self, size: u32) -> Result<Option<Meta>, Error> {
791  //   if self.ro {
792  //     return Err(Error::ReadOnly);
793  //   }
794
795  //   if size == 0 {
796  //     return Ok(None);
797  //   }
798
799  //   if size > self.page_size {
800  //     return Err(Error::larger_than_page_size(size, self.page_size));
801  //   }
802
803  //   let header = self.header_mut();
804  //   let mut padding_to_next_page = 0;
805
806  //   let page_boundary = self.nearest_page_boundary(header.allocated);
807  //   let mut want = header.allocated + size;
808
809  //   // Ensure that the allocation will fit within page
810  //   if want > page_boundary {
811  //     // Adjust the allocation to start at the next page boundary
812  //     padding_to_next_page = page_boundary - header.allocated;
813  //     want += padding_to_next_page;
814  //   }
815
816  //   if want <= self.cap {
817  //     let offset = header.allocated;
818  //     header.allocated = want;
819
820  //     #[cfg(feature = "tracing")]
821  //     tracing::debug!(
822  //       "allocate {} bytes at offset {} from memory",
823  //       size + padding_to_next_page,
824  //       offset
825  //     );
826
827  //     let mut allocated = Meta::new(self.ptr as _, offset, size + padding_to_next_page);
828  //     allocated.ptr_offset = allocated.memory_offset + padding_to_next_page;
829  //     allocated.ptr_size = size;
830  //     unsafe { allocated.clear(self) };
831
832  //     #[cfg(all(test, feature = "memmap", not(target_family = "wasm")))]
833  //     self.check_page_boundary(allocated.ptr_offset, allocated.ptr_size);
834
835  //     return Ok(Some(allocated));
836  //   }
837
838  //   Err(Error::InsufficientSpace {
839  //     requested: want - header.allocated,
840  //     available: self.remaining() as u32,
841  //   })
842  // }
843
844  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
845  // #[inline]
846  // fn nearest_page_boundary(&self, offset: u32) -> u32 {
847  //   // Calculate the nearest page boundary after the offset
848  //   let remainder = offset % self.page_size;
849  //   if remainder == 0 {
850  //     offset // Already at a page boundary
851  //   } else {
852  //     offset + (self.page_size - remainder)
853  //   }
854  // }
855
856  fn alloc_aligned_bytes_in<T>(&self, extra: u32) -> Result<Option<Meta>, Error> {
857    if self.ro {
858      return Err(Error::ReadOnly);
859    }
860
861    if mem::size_of::<T>() == 0 {
862      return self.alloc_bytes_in(extra);
863    }
864
865    let header = self.header_mut();
866    let allocated = header.allocated;
867    let aligned_offset = align_offset::<T>(allocated);
868    let size = mem::size_of::<T>() as u32;
869    let want = aligned_offset + size + extra;
870
871    if want <= self.cap {
872      // break size + extra;
873      let offset = header.allocated;
874      header.allocated = want;
875      let mut allocated = Meta::new(self.ptr as _, offset, want - offset);
876      allocated.align_bytes_to::<T>();
877      #[cfg(feature = "tracing")]
878      tracing::debug!(
879        "allocate {} bytes at offset {} from memory",
880        want - offset,
881        offset
882      );
883      return Ok(Some(allocated));
884    }
885
886    // allocate through slow path
887    match self.freelist {
888      Freelist::None => Err(Error::InsufficientSpace {
889        requested: size + extra,
890        available: self.remaining() as u32,
891      }),
892      Freelist::Optimistic => {
893        match self.alloc_slow_path_optimistic(Self::pad::<T>() as u32 + extra) {
894          Ok(mut bytes) => {
895            bytes.align_bytes_to::<T>();
896            Ok(Some(bytes))
897          }
898          Err(e) => Err(e),
899        }
900      }
901      Freelist::Pessimistic => {
902        match self.alloc_slow_path_pessimistic(Self::pad::<T>() as u32 + extra) {
903          Ok(mut bytes) => {
904            bytes.align_bytes_to::<T>();
905            Ok(Some(bytes))
906          }
907          Err(e) => Err(e),
908        }
909      }
910    }
911  }
912
913  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
914  // fn alloc_aligned_bytes_within_page_in<T>(&self, extra: u32) -> Result<Option<Meta>, Error> {
915  //   if self.ro {
916  //     return Err(Error::ReadOnly);
917  //   }
918
919  //   let t_size = mem::size_of::<T>();
920
921  //   if t_size == 0 {
922  //     return self.alloc_bytes_within_page_in(extra);
923  //   }
924
925  //   let header = self.header_mut();
926  //   let allocated = header.allocated;
927
928  //   let page_boundary = self.nearest_page_boundary(allocated);
929  //   let mut aligned_offset = align_offset::<T>(allocated);
930  //   let size = t_size as u32;
931  //   let mut want = aligned_offset + size + extra;
932  //   let mut estimated_size = want - allocated;
933
934  //   // Ensure that the allocation will fit within page
935  //   if want > page_boundary {
936  //     aligned_offset = align_offset::<T>(page_boundary);
937  //     want = aligned_offset + size + extra;
938  //     estimated_size = (aligned_offset - page_boundary) + size + extra;
939  //   }
940
941  //   if estimated_size > self.page_size {
942  //     return Err(Error::larger_than_page_size(estimated_size, self.page_size));
943  //   }
944
945  //   if want <= self.cap {
946  //     let offset = header.allocated;
947  //     header.allocated = want;
948
949  //     let mut allocated = Meta::new(self.ptr as _, offset, want - offset);
950  //     allocated.ptr_offset = aligned_offset;
951  //     allocated.ptr_size = size + extra;
952  //     unsafe { allocated.clear(self) };
953
954  //     #[cfg(all(test, feature = "memmap", not(target_family = "wasm")))]
955  //     self.check_page_boundary(allocated.ptr_offset, allocated.ptr_size);
956
957  //     #[cfg(feature = "tracing")]
958  //     tracing::debug!(
959  //       "allocate {} bytes at offset {} from memory",
960  //       want - offset,
961  //       offset
962  //     );
963  //     return Ok(Some(allocated));
964  //   }
965
966  //   Err(Error::InsufficientSpace {
967  //     requested: want,
968  //     available: self.remaining() as u32,
969  //   })
970  // }
971
972  fn alloc_in<T>(&self) -> Result<Option<Meta>, Error> {
973    if self.ro {
974      return Err(Error::ReadOnly);
975    }
976
977    let t_size = mem::size_of::<T>();
978    if t_size == 0 {
979      return Ok(None);
980    }
981
982    let header = self.header_mut();
983    let allocated = header.allocated;
984    let align_offset = align_offset::<T>(allocated);
985    let size = t_size as u32;
986    let want = align_offset + size;
987
988    if want <= self.cap {
989      let offset = header.allocated;
990      header.allocated = want;
991      let mut allocated = Meta::new(self.ptr as _, offset, want - offset);
992      allocated.align_to::<T>();
993
994      #[cfg(feature = "tracing")]
995      tracing::debug!(
996        "allocate {} bytes at offset {} from memory",
997        want - offset,
998        offset
999      );
1000
1001      unsafe { allocated.clear(self) };
1002      return Ok(Some(allocated));
1003    }
1004
1005    // allocate through slow path
1006    match self.freelist {
1007      Freelist::None => Err(Error::InsufficientSpace {
1008        requested: want,
1009        available: self.remaining() as u32,
1010      }),
1011      Freelist::Optimistic => match self.alloc_slow_path_optimistic(Self::pad::<T>() as u32) {
1012        Ok(mut allocated) => {
1013          allocated.align_to::<T>();
1014          Ok(Some(allocated))
1015        }
1016        Err(e) => Err(e),
1017      },
1018      Freelist::Pessimistic => match self.alloc_slow_path_pessimistic(Self::pad::<T>() as u32) {
1019        Ok(mut allocated) => {
1020          allocated.align_to::<T>();
1021          Ok(Some(allocated))
1022        }
1023        Err(e) => Err(e),
1024      },
1025    }
1026  }
1027
1028  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1029  // fn alloc_within_page_in<T>(&self) -> Result<Option<Meta>, Error> {
1030  //   if self.ro {
1031  //     return Err(Error::ReadOnly);
1032  //   }
1033
1034  //   let t_size = mem::size_of::<T>();
1035
1036  //   if t_size == 0 {
1037  //     return Ok(None);
1038  //   }
1039
1040  //   if t_size as u32 > self.page_size {
1041  //     return Err(Error::larger_than_page_size(t_size as u32, self.page_size));
1042  //   }
1043
1044  //   let header = self.header_mut();
1045  //   let allocated = header.allocated;
1046  //   let page_boundary = self.nearest_page_boundary(allocated);
1047  //   let mut aligned_offset = align_offset::<T>(allocated);
1048  //   let size = mem::size_of::<T>() as u32;
1049  //   let mut want = aligned_offset + size;
1050  //   let mut estimated_size = want - allocated;
1051
1052  //   // Ensure that the allocation will fit within page
1053  //   if want > page_boundary {
1054  //     aligned_offset = align_offset::<T>(page_boundary);
1055  //     want = aligned_offset + size;
1056  //     estimated_size = (aligned_offset - page_boundary) + size;
1057  //   }
1058
1059  //   if estimated_size > self.page_size {
1060  //     return Err(Error::larger_than_page_size(estimated_size, self.page_size));
1061  //   }
1062
1063  //   if want <= self.cap {
1064  //     let offset = header.allocated;
1065  //     header.allocated = want;
1066  //     let mut allocated = Meta::new(self.ptr as _, offset, want - offset);
1067  //     allocated.ptr_offset = aligned_offset;
1068  //     allocated.ptr_size = size;
1069  //     unsafe { allocated.clear(self) };
1070
1071  //     #[cfg(all(test, feature = "memmap", not(target_family = "wasm")))]
1072  //     self.check_page_boundary(allocated.ptr_offset, allocated.ptr_size);
1073
1074  //     #[cfg(feature = "tracing")]
1075  //     tracing::debug!(
1076  //       "allocate {} bytes at offset {} from memory",
1077  //       want - offset,
1078  //       offset
1079  //     );
1080
1081  //     unsafe { allocated.clear(self) };
1082  //     return Ok(Some(allocated));
1083  //   }
1084
1085  //   Err(Error::InsufficientSpace {
1086  //     requested: want,
1087  //     available: self.remaining() as u32,
1088  //   })
1089  // }
1090
1091  fn alloc_slow_path_pessimistic(&self, size: u32) -> Result<Meta, Error> {
1092    if self.ro {
1093      return Err(Error::ReadOnly);
1094    }
1095
1096    let Some(((prev_node_val, prev_node), (next_node_val, _))) =
1097      self.find_prev_and_next(size, |val, next_node_size| val <= next_node_size)
1098    else {
1099      return Err(Error::InsufficientSpace {
1100        requested: size,
1101        available: self.remaining() as u32,
1102      });
1103    };
1104
1105    let (prev_node_size, next_node_offset) = decode_segment_node(prev_node_val);
1106    let (next_node_size, next_next_node_offset) = decode_segment_node(next_node_val);
1107
1108    let remaining = next_node_size - size;
1109
1110    let segment_node = unsafe { Segment::from_offset(self, next_node_offset, next_node_size) };
1111
1112    // update the prev node to point to the next next node.
1113    let updated_prev = encode_segment_node(prev_node_size, next_next_node_offset);
1114
1115    *prev_node.as_inner_ref_mut() = updated_prev;
1116
1117    #[cfg(feature = "tracing")]
1118    tracing::debug!(
1119      "allocate {} bytes at offset {} from segment",
1120      size,
1121      next_node_offset
1122    );
1123
1124    let mut memory_size = next_node_size;
1125    let data_end_offset = segment_node.data_offset + size;
1126    // check if the remaining is enough to allocate a new segment.
1127    if self.validate_segment(data_end_offset, remaining) {
1128      memory_size -= remaining;
1129      // We have successfully remove the head node from the list.
1130      // Then we can allocate the memory.
1131      // give back the remaining memory to the free list.
1132
1133      // Safety: the `next + size` is in bounds, and `node_size - size` is also in bounds.
1134      self.pessimistic_dealloc(data_end_offset, remaining);
1135    }
1136
1137    let mut allocated = Meta::new(self.ptr as _, segment_node.ptr_offset, memory_size);
1138    allocated.ptr_offset = segment_node.data_offset;
1139    allocated.ptr_size = size;
1140    unsafe {
1141      allocated.clear(self);
1142    }
1143    Ok(allocated)
1144  }
1145
1146  /// It is like a pop operation, we will always allocate from the largest segment.
1147  fn alloc_slow_path_optimistic(&self, size: u32) -> Result<Meta, Error> {
1148    if self.ro {
1149      return Err(Error::ReadOnly);
1150    }
1151
1152    let header = self.header();
1153
1154    let sentinel = header.sentinel.as_inner_ref();
1155    let (sentinel_node_size, head_node_offset) = decode_segment_node(*sentinel);
1156
1157    // free list is empty
1158    if sentinel_node_size == SENTINEL_SEGMENT_NODE_SIZE
1159      && head_node_offset == SENTINEL_SEGMENT_NODE_OFFSET
1160    {
1161      return Err(Error::InsufficientSpace {
1162        requested: size,
1163        available: self.remaining() as u32,
1164      });
1165    }
1166
1167    let head = self.get_segment_node(head_node_offset);
1168    let head_node_size_and_next_node_offset = head.as_inner_ref();
1169    let (head_node_size, next_node_offset) =
1170      decode_segment_node(*head_node_size_and_next_node_offset);
1171
1172    // The larget segment does not have enough space to allocate, so just return err.
1173    if size > head_node_size {
1174      return Err(Error::InsufficientSpace {
1175        requested: size,
1176        available: head_node_size,
1177      });
1178    }
1179
1180    let remaining = head_node_size - size;
1181
1182    // Safety: the `next` and `node_size` are valid, because they just come from the sentinel.
1183    let segment_node = unsafe { Segment::from_offset(self, head_node_offset, head_node_size) };
1184
1185    *header.sentinel.as_inner_mut() = encode_segment_node(sentinel_node_size, next_node_offset);
1186
1187    #[cfg(feature = "tracing")]
1188    tracing::debug!(
1189      "allocate {} bytes at offset {} from segment",
1190      size,
1191      segment_node.data_offset
1192    );
1193
1194    let mut memory_size = head_node_size;
1195    let data_end_offset = segment_node.data_offset + size;
1196    // check if the remaining is enough to allocate a new segment.
1197    if self.validate_segment(data_end_offset, remaining) {
1198      memory_size -= remaining;
1199      // We have successfully remove the head node from the list.
1200      // Then we can allocate the memory.
1201      // give back the remaining memory to the free list.
1202
1203      // Safety: the `next + size` is in bounds, and `node_size - size` is also in bounds.
1204      self.optimistic_dealloc(data_end_offset, remaining);
1205    }
1206
1207    let mut allocated = Meta::new(self.ptr as _, segment_node.ptr_offset, memory_size);
1208    allocated.ptr_offset = segment_node.data_offset;
1209    allocated.ptr_size = size;
1210    unsafe {
1211      allocated.clear(self);
1212    }
1213    Ok(allocated)
1214  }
1215
1216  fn discard_freelist_in(&self) -> u32 {
1217    let header = self.header();
1218    let mut discarded = 0;
1219
1220    loop {
1221      let sentinel = header.sentinel.as_inner_ref();
1222      let (sentinel_node_size, head_node_offset) = decode_segment_node(*sentinel);
1223
1224      // free list is empty
1225      if sentinel_node_size == SENTINEL_SEGMENT_NODE_SIZE
1226        && head_node_offset == SENTINEL_SEGMENT_NODE_OFFSET
1227      {
1228        return discarded;
1229      }
1230
1231      let head = self.get_segment_node(head_node_offset);
1232      let head_node_size_and_next_node_offset = head.as_inner_ref();
1233      let (head_node_size, next_node_offset) =
1234        decode_segment_node(*head_node_size_and_next_node_offset);
1235
1236      // Safety: the `next` and `node_size` are valid, because they just come from the sentinel.
1237      let segment_node = unsafe { Segment::from_offset(self, head_node_offset, head_node_size) };
1238
1239      *header.sentinel.as_inner_mut() = encode_segment_node(sentinel_node_size, next_node_offset);
1240
1241      // incresase the discarded memory.
1242
1243      #[cfg(feature = "tracing")]
1244      tracing::debug!("discard {} bytes", segment_node.data_size);
1245      self.header_mut().discarded += segment_node.data_size;
1246
1247      discarded += segment_node.data_size;
1248    }
1249  }
1250
1251  /// Returns `true` if this offset and size is valid for a segment node.
1252  #[inline]
1253  fn validate_segment(&self, offset: u32, size: u32) -> bool {
1254    if offset == 0 || size == 0 {
1255      return false;
1256    }
1257
1258    let aligned_offset = align_offset::<u64>(offset) as usize;
1259    let padding = aligned_offset - offset as usize;
1260    let segmented_node_size = padding + SEGMENT_NODE_SIZE;
1261    if segmented_node_size >= size as usize {
1262      return false;
1263    }
1264
1265    let available_bytes = size - segmented_node_size as u32;
1266    if available_bytes < self.header().min_segment_size {
1267      return false;
1268    }
1269
1270    true
1271  }
1272
1273  #[inline]
1274  fn try_new_segment(&self, offset: u32, size: u32) -> Option<Segment> {
1275    if offset == 0 || size == 0 {
1276      return None;
1277    }
1278
1279    let aligned_offset = align_offset::<u64>(offset) as usize;
1280    let padding = aligned_offset - offset as usize;
1281    let segmented_node_size = padding + SEGMENT_NODE_SIZE;
1282    if segmented_node_size >= size as usize {
1283      self.increase_discarded(size);
1284      return None;
1285    }
1286
1287    let available_bytes = size - segmented_node_size as u32;
1288    if available_bytes < self.header().min_segment_size {
1289      self.increase_discarded(size);
1290      return None;
1291    }
1292
1293    Some(Segment {
1294      ptr: self.ptr,
1295      ptr_offset: aligned_offset as u32,
1296      data_offset: (aligned_offset + SEGMENT_NODE_SIZE) as u32,
1297      data_size: available_bytes,
1298    })
1299  }
1300
1301  #[inline]
1302  fn get_segment_node(&self, offset: u32) -> &UnsafeCell<u64> {
1303    // Safety: the offset is in bounds and well aligned.
1304    unsafe {
1305      let ptr = self.ptr.add(offset as usize);
1306      &*(ptr as *const _)
1307    }
1308  }
1309
1310  #[inline]
1311  fn pad<T>() -> usize {
1312    let size = mem::size_of::<T>();
1313    let align = mem::align_of::<T>();
1314    size + align - 1
1315  }
1316
1317  // #[cfg(test)]
1318  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1319  // #[inline]
1320  // fn check_page_boundary(&self, offset: u32, len: u32) {
1321  //   if len == 0 {
1322  //     return; // A zero-length range is trivially within the same "page"
1323  //   }
1324
1325  //   // Calculate the page boundary of the start and end of the range
1326  //   let start_page = offset / self.page_size;
1327  //   let end_page = (offset + len - 1) / self.page_size;
1328
1329  //   assert_eq!(
1330  //     start_page, end_page,
1331  //     "start and end of range must be in the same page"
1332  //   );
1333  // }
1334
1335  #[cfg(test)]
1336  #[cfg(feature = "std")]
1337  #[allow(dead_code)]
1338  pub(super) fn print_segment_list(&self) {
1339    let header = self.header();
1340    let mut current: &UnsafeCell<u64> = &header.sentinel;
1341
1342    loop {
1343      let current_node = current.as_inner_ref();
1344      let (node_size, next_node_offset) = decode_segment_node(*current_node);
1345
1346      if node_size == SENTINEL_SEGMENT_NODE_SIZE {
1347        if next_node_offset == SENTINEL_SEGMENT_NODE_OFFSET {
1348          break;
1349        }
1350
1351        current = self.get_segment_node(next_node_offset);
1352        continue;
1353      }
1354
1355      std::println!("node_size: {node_size}, next_node_offset: {next_node_offset}",);
1356
1357      if next_node_offset == SENTINEL_SEGMENT_NODE_OFFSET {
1358        break;
1359      }
1360      current = self.get_segment_node(next_node_offset);
1361    }
1362  }
1363}
1364
1365impl Drop for Arena {
1366  fn drop(&mut self) {
1367    use super::sealed::RefCounter;
1368
1369    unsafe {
1370      let memory_ptr = self.inner.as_ptr();
1371      let memory = &*memory_ptr;
1372      // `Memory` storage... follow the drop steps from Arc.
1373      if memory.refs().fetch_sub(1, Ordering::Release) != 1 {
1374        return;
1375      }
1376
1377      // This fence is needed to prevent reordering of use of the data and
1378      // deletion of the data.  Because it is marked `Release`, the decreasing
1379      // of the reference count synchronizes with this `Acquire` fence. This
1380      // means that use of the data happens before decreasing the reference
1381      // count, which happens before this fence, which happens before the
1382      // deletion of the data.
1383      //
1384      // As explained in the [Boost documentation][1],
1385      //
1386      // > It is important to enforce any possible access to the object in one
1387      // > thread (through an existing reference) to *happen before* deleting
1388      // > the object in a different thread. This is achieved by a "release"
1389      // > operation after dropping a reference (any access to the object
1390      // > through this reference must obviously happened before), and an
1391      // > "acquire" operation before deleting the object.
1392      //
1393      // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1394      //
1395      // Thread sanitizer does not support atomic fences. Use an atomic load
1396      // instead.
1397      // Drop the data
1398      let mut memory = Box::from_raw(memory_ptr);
1399
1400      // Relaxed is enough here as we're in a drop, no one else can
1401      // access this memory anymore.
1402      memory.unmount();
1403    }
1404  }
1405}
1406
1407#[cfg(test)]
1408mod tests;