rarena_allocator/
allocator.rs

1use core::ptr::NonNull;
2
3use super::*;
4
5macro_rules! impl_bytes_utils_for_allocator {
6  ($this:ident::$from:ident($ty:ident, $offset:ident)) => {{
7    const SIZE: usize = core::mem::size_of::<$ty>();
8
9    let allocated = $this.allocated();
10    if $offset + SIZE > allocated {
11      return Err(Error::OutOfBounds { $offset, allocated });
12    }
13
14    let buf = unsafe {
15      let ptr = $this.raw_ptr().add($offset);
16      core::slice::from_raw_parts(ptr, SIZE)
17    };
18
19    Ok($ty::$from(buf.try_into().unwrap()))
20  }};
21  (unsafe $this:ident::$from:ident($ty:ident, $offset:ident)) => {{
22    const SIZE: usize = core::mem::size_of::<$ty>();
23
24    let buf = unsafe {
25      let ptr = $this.raw_ptr().add($offset);
26      core::slice::from_raw_parts(ptr, SIZE)
27    };
28
29    $ty::$from(buf.try_into().unwrap())
30  }};
31}
32
33macro_rules! define_bytes_utils {
34  ($($ty:ident:$endian:literal), +$(,)?) => {
35    $(
36      paste::paste! {
37        #[doc = "Returns a `" $ty "` from the allocator."]
38        fn [< get_ $ty _ $endian >](&self, offset: usize) -> Result<$ty, Error> {
39          impl_bytes_utils_for_allocator!(self::[< from_ $endian _bytes >]($ty, offset))
40        }
41
42        #[doc = "Returns a `" $ty "` from the allocator without bounds checking."]
43        ///
44        /// ## Safety
45        /// - `offset..offset + size` must be within allocated memory.
46        unsafe fn [< get_ $ty _ $endian _unchecked>](&self, offset: usize) -> $ty {
47          impl_bytes_utils_for_allocator!(unsafe self::[< from_ $endian _bytes >]($ty, offset))
48        }
49      }
50    )*
51  };
52}
53
54macro_rules! impl_leb128_utils_for_allocator {
55  ($this:ident($ty:ident, $offset:ident, $size:literal)) => {{
56    let allocated = $this.allocated();
57    if $offset >= allocated {
58      return Err(Error::OutOfBounds { $offset, allocated });
59    }
60
61    let buf = unsafe {
62      let ptr = $this.get_pointer($offset);
63      let gap = (allocated - $offset).min($size);
64      core::slice::from_raw_parts(ptr, gap)
65    };
66
67    paste::paste! {
68      dbutils::leb128::[< decode_ $ty _varint >](buf).map_err(Into::into)
69    }
70  }};
71}
72
73macro_rules! define_leb128_utils {
74  ($($ty:ident:$size:literal), +$(,)?) => {
75    $(
76      paste::paste! {
77        #[doc = "Returns a `" $ty "` in LEB128 format from the allocator at the given offset."]
78        ///
79        /// ## Safety
80        /// - `offset` must be within the allocated memory of the allocator.
81        fn [< get_ $ty _varint >](&self, offset: usize) -> Result<(usize, $ty), Error> {
82          impl_leb128_utils_for_allocator!(self($ty, offset, $size))
83        }
84      }
85    )*
86  };
87}
88
89/// A trait for easily interacting with the sync and unsync allocator allocators.
90pub trait Allocator: sealed::Sealed {
91  /// The path type of the allocator.
92  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
93  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
94  type Path;
95
96  /// Returns the number of bytes that are reserved by the allocator.
97  fn reserved_bytes(&self) -> usize;
98
99  /// Returns the reserved bytes of the allocator specified in the [`Options::with_reserved`].
100  fn reserved_slice(&self) -> &[u8];
101
102  /// Returns the mutable reserved bytes of the allocator specified in the [`Options::with_reserved`].
103  ///
104  /// ## Safety
105  /// - The caller need to make sure there is no data-race
106  ///
107  /// # Panic
108  /// - If in read-only mode, and num of reserved bytes is greater than 0, this method will panic.
109  #[allow(clippy::mut_from_ref)]
110  unsafe fn reserved_slice_mut(&self) -> &mut [u8];
111
112  /// Allocates a `T` in the allocator.
113  ///
114  /// ## Safety
115  ///
116  /// - If `T` needs to be dropped and callers invoke [`RefMut::detach`](crate::RefMut::detach),
117  ///   then the caller must ensure that the `T` is dropped before the allocator is dropped.
118  ///   Otherwise, it will lead to memory leaks.
119  ///
120  /// - If this is file backed allocator, then `T` must be recoverable from bytes.
121  ///   1. Types require allocation are not recoverable.
122  ///   2. Pointers are not recoverable, like `*const T`, `*mut T`, `NonNull` and any structs contains pointers,
123  ///      although those types are on stack, but they cannot be recovered, when reopens the file.
124  ///
125  /// ## Examples
126  ///
127  /// ## Memory leak
128  ///
129  /// The following example demonstrates the memory leak when the `T` is a heap allocated type and detached.
130  ///
131  /// ```ignore
132  ///
133  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
134  ///
135  /// {
136  ///   let mut data = arena.alloc::<Vec<u8>>().unwrap();
137  ///   data.detach();
138  ///   data.write(vec![1, 2, 3]);
139  /// }
140  ///
141  /// drop(arena); // memory leak, the `Vec<u8>` is not dropped.
142  /// ```
143  ///
144  /// ## Undefined behavior
145  ///
146  /// The following example demonstrates the undefined behavior when the `T` is not recoverable.
147  ///
148  /// ```ignore
149  ///
150  /// struct TypeOnHeap {
151  ///   data: Vec<u8>,
152  /// }
153  ///
154  /// let arena = Options::new().with_create_new(1000).with_read(true).with_write(true).map_mut::<Arena, _>("path/to/file").unwrap();
155  ///
156  /// let mut data = arena.alloc::<TypeOnHeap>().unwrap();
157  /// data.detach();
158  /// data.write(TypeOnHeap { data: vec![1, 2, 3] });
159  /// let offset = data.offset();
160  /// drop(arena);
161  ///
162  /// // reopen the file
163  /// let arena = Options::new().with_read(true).map::<Arena, _>("path/to/file").unwrap();
164  ///
165  /// let foo = &*arena.get_aligned_pointer::<TypeOnHeap>(offset as usize);
166  /// let b = foo.data[1]; // undefined behavior, the `data`'s pointer stored in the file is not valid anymore.
167  /// ```
168  ///
169  /// ## Good practice
170  ///
171  /// Some examples about how to use this method correctly.
172  ///
173  /// ### Heap allocated type with carefull memory management
174  ///
175  /// ```ignore
176  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
177  ///
178  /// // Do not invoke detach, so when the data is dropped, the drop logic will be handled by the allocator.
179  /// // automatically.
180  /// {
181  ///   let mut data = arena.alloc::<Vec<u8>>().unwrap();
182  ///   data.write(vec![1, 2, 3]);
183  /// }
184  ///
185  ///
186  /// let mut detached_data = arena.alloc::<Vec<u8>>().unwrap();
187  /// detached_data.detach();
188  /// detached_data.write(vec![4, 5, 6]);
189  ///
190  /// // some other logic
191  ///
192  /// core::ptr::drop_in_place(detached_data.as_mut()); // drop the `Vec` manually.
193  ///
194  /// drop(arena); // it is safe, the `Vec` is already dropped.
195  /// ```
196  ///
197  /// ### Recoverable type with file backed allocator
198  ///
199  /// ```ignore
200  ///
201  /// struct Recoverable {
202  ///   field1: u64,
203  ///   field2: AtomicU32,
204  /// }
205  ///
206  /// let arena = Options::new().with_create_new(1000).with_read(true).with_write(true).map_mut::<Arena, _>("path/to/file").unwrap();
207  ///
208  /// let mut data = arena.alloc::<Recoverable>().unwrap();
209  /// data.write(Recoverable { field1: 10, field2: AtomicU32::new(20) });
210  /// data.detach();
211  /// let offset = data.offset();
212  /// drop(arena);
213  ///
214  /// // reopen the file
215  /// let arena = Options::new().with_read(true).map::<Arena, _>("path/to/file").unwrap();
216  ///
217  /// let foo = &*arena.get_aligned_pointer::<Recoverable>(offset as usize);
218  ///
219  /// assert_eq!(foo.field1, 10);
220  /// assert_eq!(foo.field2.load(Ordering::Acquire), 20);
221  /// ```
222  unsafe fn alloc<T>(&self) -> Result<RefMut<'_, T, Self>, Error>;
223
224  /// Allocates a byte slice that can hold a well-aligned `T` and extra `size` bytes.
225  ///
226  /// The layout of the allocated memory is:
227  ///
228  /// ```text
229  /// | T | [u8; size] |
230  /// ```
231  ///
232  /// ## Example
233  ///
234  /// ```ignore
235  /// let mut bytes = arena.alloc_aligned_bytes::<T>(extra).unwrap();
236  /// bytes.put(val).unwrap(); // write `T` to the byte slice.
237  /// ```
238  fn alloc_aligned_bytes<T>(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error>;
239
240  /// Allocates an owned byte slice that can hold a well-aligned `T` and extra `size` bytes.
241  ///
242  /// The layout of the allocated memory is:
243  ///
244  /// ```text
245  /// | T | [u8; size] |
246  /// ```
247  ///
248  /// ## Example
249  ///
250  /// ```ignore
251  /// let mut bytes = arena.alloc_aligned_bytes_owned::<T>(extra).unwrap();
252  /// bytes.put(val).unwrap(); // write `T` to the byte slice.
253  /// ```
254  fn alloc_aligned_bytes_owned<T>(&self, size: u32) -> Result<BytesMut<Self>, Error> {
255    self
256      .alloc_aligned_bytes::<T>(size)
257      .map(|mut b| b.to_owned())
258  }
259
260  // /// Allocates an owned byte slice that can hold a well-aligned `T` and extra `size` bytes.
261  // ///
262  // /// The layout of the allocated memory is:
263  // ///
264  // /// ```text
265  // /// | T | [u8; size] |
266  // /// ```
267  // ///
268  // /// ## Example
269  // ///
270  // /// ```ignore
271  // /// let mut bytes = arena.alloc_aligned_bytes_owned_within_page::<T>(extra).unwrap();
272  // /// bytes.put(val).unwrap(); // write `T` to the byte slice.
273  // /// ```
274  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
275  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
276  // fn alloc_aligned_bytes_owned_within_page<T>(&self, size: u32) -> Result<BytesMut<Self>, Error> {
277  //   self
278  //     .alloc_aligned_bytes_within_page::<T>(size)
279  //     .map(|mut b| b.to_owned())
280  // }
281
282  // /// Allocates a byte slice that can hold a well-aligned `T` and extra `size` bytes within a page.
283  // ///
284  // /// The layout of the allocated memory is:
285  // ///
286  // /// ```text
287  // /// | T | [u8; size] |
288  // /// ```
289  // ///
290  // /// ## Example
291  // ///
292  // /// ```ignore
293  // /// let mut bytes = arena.alloc_aligned_bytes_within_page::<T>(extra).unwrap();
294  // /// bytes.put(val).unwrap(); // write `T` to the byte slice.
295  // /// ```
296  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
297  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
298  // fn alloc_aligned_bytes_within_page<T>(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error>;
299
300  /// Allocates a slice of memory in the allocator.
301  ///
302  /// The [`BytesRefMut`](crate::BytesRefMut) is zeroed out.
303  ///
304  /// If you want a [`BytesMut`](crate::BytesMut), see [`alloc_bytes_owned`](Allocator::alloc_bytes_owned).
305  fn alloc_bytes(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error>;
306
307  /// Allocates an owned slice of memory in the allocator.
308  ///
309  /// The cost of this method is an extra atomic operation, compared to [`alloc_bytes`](Allocator::alloc_bytes).
310  fn alloc_bytes_owned(&self, size: u32) -> Result<BytesMut<Self>, Error> {
311    self.alloc_bytes(size).map(|mut b| b.to_owned())
312  }
313
314  // /// Allocates an owned slice of memory in the allocator in the same page.
315  // ///
316  // /// Compared to [`alloc_bytes_owned`](Self::alloc_bytes_owned), this method only allocates from the main memory, so
317  // /// the it means that if main memory does not have enough space but the freelist has segments can hold the size,
318  // /// this method will still return an error.
319  // ///
320  // /// The cost of this method is an extra atomic operation, compared to [`alloc_bytes_within_page`](Allocator::alloc_bytes_within_page).
321  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
322  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
323  // fn alloc_bytes_owned_within_page(&self, size: u32) -> Result<BytesMut<Self>, Error> {
324  //   self.alloc_bytes_within_page(size).map(|mut b| b.to_owned())
325  // }
326
327  // /// Allocates a slice of memory in the allocator in the same page.
328  // ///
329  // /// Compared to [`alloc_bytes`](Allocator::alloc_bytes), this method only allocates from the main memory, so
330  // /// the it means that if main memory does not have enough space but the freelist has segments can hold the size,
331  // /// this method will still return an error.
332  // ///
333  // /// The [`BytesRefMut`](crate::BytesRefMut) is zeroed out.
334  // ///
335  // /// If you want a [`BytesMut`](crate::BytesMut), see [`alloc_bytes_owned_within_page`](Allocator::alloc_bytes_owned_within_page).
336  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
337  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
338  // fn alloc_bytes_within_page(&self, size: u32) -> Result<BytesRefMut<'_, Self>, Error>;
339
340  /// Allocates a `T` in the allocator. Like [`alloc`](Allocator::alloc), but returns an `Owned`.
341  ///
342  /// The cost is one more atomic operation than [`alloc`](Allocator::alloc).
343  ///
344  /// ## Safety
345  ///
346  /// - See [`alloc`](Allocator::alloc) for safety.
347  ///
348  /// ## Example
349  ///
350  /// ```rust
351  /// use rarena_allocator::{sync::Arena, Options, Allocator};
352  ///
353  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
354  ///
355  /// unsafe {
356  ///   let mut data = arena.alloc_owned::<u64>().unwrap();
357  ///   data.write(10);
358  ///
359  ///   assert_eq!(*data.as_ref(), 10);
360  /// }
361  /// ```
362  unsafe fn alloc_owned<T>(&self) -> Result<Owned<T, Self>, Error> {
363    unsafe { self.alloc::<T>().map(|mut r| r.to_owned()) }
364  }
365
366  // /// Allocates a `T` in the allocator in the same page. Like [`alloc_within_page`](Allocator::alloc_within_page), but returns an `Owned`.
367  // ///
368  // /// ## Safety
369  // /// - See [`alloc`](Allocator::alloc) for safety.
370  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
371  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
372  // unsafe fn alloc_owned_within_page<T>(&self) -> Result<Owned<T, Self>, Error> {
373  //   self.alloc_within_page::<T>().map(|mut r| r.to_owned())
374  // }
375
376  // /// Allocates a `T` in the allocator in the same page.
377  // ///
378  // /// ## Safety
379  // ///
380  // /// - See [`alloc`](Allocator::alloc) for safety.
381  // #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
382  // #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
383  // unsafe fn alloc_within_page<T>(&self) -> Result<RefMut<'_, T, Self>, Error>;
384
385  /// Returns the number of bytes allocated by the allocator.
386  ///
387  /// ## Example
388  ///
389  /// ```rust
390  /// use rarena_allocator::{sync::Arena, Options, Allocator};
391  ///
392  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
393  /// let allocated = arena.allocated();
394  /// ```
395  fn allocated(&self) -> usize;
396
397  /// Returns the whole main memory of the allocator as a byte slice.
398  ///
399  /// ## Example
400  ///
401  /// ```rust
402  /// use rarena_allocator::{sync::Arena, Options, Allocator};
403  ///
404  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
405  /// let memory = arena.allocated_memory();
406  /// ```
407  #[inline]
408  fn allocated_memory(&self) -> &[u8] {
409    let allocated = self.allocated();
410    unsafe { core::slice::from_raw_parts(self.raw_ptr(), allocated) }
411  }
412
413  /// Returns the start pointer of the main memory of the allocator.
414  fn raw_mut_ptr(&self) -> *mut u8;
415
416  /// Returns the start pointer of the main memory of the allocator.
417  fn raw_ptr(&self) -> *const u8;
418
419  /// Returns the capacity of the allocator.
420  ///
421  /// ## Example
422  ///
423  /// ```rust
424  /// use rarena_allocator::{sync::Arena, Options, Allocator};
425  ///
426  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
427  /// let capacity = arena.capacity();
428  /// ```
429  #[inline]
430  fn capacity(&self) -> usize {
431    self.as_ref().cap() as usize
432  }
433
434  /// Clear the allocator.
435  ///
436  /// ## Safety
437  /// - The current pointers get from the allocator cannot be used anymore after calling this method.
438  /// - This method is not thread-safe.
439  ///
440  /// ## Examples
441  ///
442  /// Undefine behavior:
443  ///
444  /// ```ignore
445  /// let mut data = arena.alloc::<Vec<u8>>().unwrap();
446  ///
447  /// arena.clear();
448  ///
449  /// data.write(vec![1, 2, 3]); // undefined behavior
450  /// ```
451  ///
452  /// Good practice:
453  ///
454  /// ```rust
455  /// use rarena_allocator::{sync::Arena, Options, Allocator};
456  ///
457  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
458  ///
459  /// unsafe {
460  ///   let mut data = arena.alloc::<Vec<u8>>().unwrap();
461  ///   data.write(vec![1, 2, 3]);
462  ///
463  ///   arena.clear().unwrap();
464  /// }
465  ///
466  /// ```
467  unsafe fn clear(&self) -> Result<(), Error>;
468
469  /// Returns the data offset of the allocator. The offset is the end of the reserved bytes of the allocator.
470  ///
471  /// ## Example
472  ///
473  /// ```rust
474  /// use rarena_allocator::{sync::Arena, Options, Allocator};
475  ///
476  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
477  /// let data_offset = arena.data_offset();
478  /// ```
479  #[inline]
480  fn data_offset(&self) -> usize {
481    self.as_ref().data_offset()
482  }
483
484  /// Returns the data section of the allocator as a byte slice, header is not included.
485  ///
486  /// ## Example
487  ///
488  /// ```rust
489  /// use rarena_allocator::{sync::Arena, Options, Allocator};
490  ///
491  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
492  /// let data = arena.data();
493  /// ```
494  #[inline]
495  fn data(&self) -> &[u8] {
496    unsafe {
497      let offset = self.data_offset();
498      let ptr = self.raw_ptr().add(offset);
499      let allocated = self.allocated();
500      core::slice::from_raw_parts(ptr, allocated - offset)
501    }
502  }
503
504  /// Deallocates the memory at the given offset and size, the `offset..offset + size` will be made to a segment,
505  /// returns `true` if the deallocation is successful.
506  ///
507  /// ## Safety
508  /// - you must ensure the same `offset..offset + size` is not deallocated twice.
509  /// - `offset` must be larger than the [`Allocator::data_offset`].
510  /// - `offset + size` must be less than the [`Allocator::allocated`].
511  unsafe fn dealloc(&self, offset: u32, size: u32) -> bool;
512
513  /// Discards all freelist nodes in the allocator.
514  ///
515  /// Returns the number of bytes discarded.
516  ///
517  /// ## Example
518  ///
519  /// ```rust
520  /// use rarena_allocator::{sync::Arena, Options, Allocator};
521  ///
522  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
523  /// arena.discard_freelist();
524  /// ```
525  fn discard_freelist(&self) -> Result<u32, Error>;
526
527  /// Returns the number of bytes discarded by the allocator.
528  ///
529  /// ## Example
530  ///
531  /// ```rust
532  /// use rarena_allocator::{sync::Arena, Options, Allocator};
533  ///
534  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
535  /// let discarded = arena.discarded();
536  /// ```
537  fn discarded(&self) -> u32;
538
539  /// Flushes the memory-mapped file to disk.
540  ///
541  /// ## Example
542  ///
543  /// ```rust
544  /// use rarena_allocator::{sync::Arena, Options, Allocator};
545  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
546  /// # std::fs::remove_file(&path);
547  ///
548  ///
549  ///
550  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
551  /// arena.flush().unwrap();
552  ///
553  /// # std::fs::remove_file(path);
554  /// ```
555  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
556  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
557  #[inline]
558  fn flush(&self) -> std::io::Result<()> {
559    self.as_ref().flush()
560  }
561
562  /// Flushes the memory-mapped file to disk asynchronously.
563  ///
564  /// ## Example
565  ///
566  /// ```rust
567  /// use rarena_allocator::{sync::Arena, Options, Allocator};
568  ///
569  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
570  /// # std::fs::remove_file(&path);
571  ///
572  ///
573  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
574  ///
575  /// arena.flush_async().unwrap();
576  ///
577  /// # std::fs::remove_file(path);
578  /// ```
579  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
580  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
581  #[inline]
582  fn flush_async(&self) -> std::io::Result<()> {
583    self.as_ref().flush_async()
584  }
585
586  /// Flushes outstanding memory map modifications in the range to disk.
587  ///
588  /// ## Example
589  ///
590  /// ```rust
591  /// use rarena_allocator::{sync::Arena, Options, Allocator};
592  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
593  /// # std::fs::remove_file(&path);
594  ///
595  ///
596  ///
597  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
598  /// arena.flush_range(0, 100).unwrap();
599  ///
600  /// # std::fs::remove_file(path);
601  /// ```
602  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
603  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
604  #[inline]
605  fn flush_range(&self, offset: usize, len: usize) -> std::io::Result<()> {
606    self.as_ref().flush_range(offset, len)
607  }
608
609  /// Asynchronously flushes outstanding memory map modifications in the range to disk.
610  ///
611  /// ## Example
612  ///
613  /// ```rust
614  /// use rarena_allocator::{sync::Arena, Options, Allocator};
615  ///
616  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
617  /// # std::fs::remove_file(&path);
618  ///
619  ///
620  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
621  ///
622  /// arena.flush_async_range(0, 100).unwrap();
623  ///
624  /// # std::fs::remove_file(path);
625  /// ```
626  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
627  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
628  #[inline]
629  fn flush_async_range(&self, offset: usize, len: usize) -> std::io::Result<()> {
630    self.as_ref().flush_async_range(offset, len)
631  }
632
633  /// Flushes outstanding memory map modifications in `Allocator`'s header to disk.
634  ///
635  /// ## Example
636  ///
637  /// ```rust
638  /// use rarena_allocator::{sync::Arena, Options, Allocator};
639  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
640  /// # std::fs::remove_file(&path);
641  ///
642  ///
643  ///
644  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
645  /// arena.flush_header().unwrap();
646  ///
647  /// # std::fs::remove_file(path);
648  /// ```
649  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
650  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
651  #[inline]
652  fn flush_header(&self) -> std::io::Result<()> {
653    self.flush_header_and_range(0, 0)
654  }
655
656  /// Asynchronously flushes outstanding memory map modifications `Allocator`'s header to disk.
657  ///
658  /// ## Example
659  ///
660  /// ```rust
661  /// use rarena_allocator::{sync::Arena, Options, Allocator};
662  ///
663  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
664  /// # std::fs::remove_file(&path);
665  ///
666  ///
667  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
668  ///
669  /// arena.flush_async_header().unwrap();
670  ///
671  /// # std::fs::remove_file(path);
672  /// ```
673  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
674  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
675  #[inline]
676  fn flush_async_header(&self) -> std::io::Result<()> {
677    self.flush_async_header_and_range(0, 0)
678  }
679
680  /// Flushes outstanding memory map modifications in the range and `Allocator`'s header to disk.
681  ///
682  /// ## Example
683  ///
684  /// ```rust
685  /// use rarena_allocator::{sync::Arena, Options, Allocator};
686  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
687  /// # std::fs::remove_file(&path);
688  ///
689  ///
690  ///
691  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
692  /// arena.flush_header_and_range(0, 100).unwrap();
693  ///
694  /// # std::fs::remove_file(path);
695  /// ```
696  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
697  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
698  #[inline]
699  fn flush_header_and_range(&self, offset: usize, len: usize) -> std::io::Result<()> {
700    self.as_ref().flush_header_and_range(offset, len)
701  }
702
703  /// Asynchronously flushes outstanding memory map modifications in the range and `Allocator`'s header to disk.
704  ///
705  /// ## Example
706  ///
707  /// ```rust
708  /// use rarena_allocator::{sync::Arena, Options, Allocator};
709  ///
710  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
711  /// # std::fs::remove_file(&path);
712  ///
713  ///
714  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
715  ///
716  /// arena.flush_async_header_and_range(0, 100).unwrap();
717  ///
718  /// # std::fs::remove_file(path);
719  /// ```
720  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
721  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
722  #[inline]
723  fn flush_async_header_and_range(&self, offset: usize, len: usize) -> std::io::Result<()> {
724    self.as_ref().flush_async_header_and_range(offset, len)
725  }
726
727  /// Returns a pointer to the memory at the given offset.
728  ///
729  /// ## Safety
730  /// - `offset` must be less than the capacity of the allocator.
731  #[inline]
732  unsafe fn get_pointer(&self, offset: usize) -> *const u8 {
733    unsafe {
734      if offset == 0 {
735        return self.raw_ptr();
736      }
737
738      self.raw_ptr().add(offset)
739    }
740  }
741
742  /// Returns a pointer to the memory at the given offset.
743  /// If the allocator is read-only, then this method will return a null pointer.
744  ///
745  /// ## Safety
746  /// - `offset` must be less than the capacity of the allocator.
747  ///
748  /// # Panic
749  /// - If the allocator is read-only, then this method will panic.
750  #[inline]
751  unsafe fn get_pointer_mut(&self, offset: usize) -> *mut u8 {
752    unsafe {
753      assert!(!self.read_only(), "ARENA is read-only");
754
755      if offset == 0 {
756        return self.raw_mut_ptr();
757      }
758
759      self.raw_mut_ptr().add(offset)
760    }
761  }
762
763  /// Returns an aligned pointer to the memory at the given offset.
764  ///
765  /// ## Safety
766  /// - `offset..offset + mem::size_of::<T>() + padding` must be allocated memory.
767  /// - `offset` must be less than the capacity of the allocator.
768  #[inline]
769  unsafe fn get_aligned_pointer<T>(&self, offset: usize) -> *const T {
770    unsafe {
771      if offset == 0 {
772        return core::ptr::null();
773      }
774
775      let align_offset = align_offset::<T>(offset as u32) as usize;
776      self.raw_ptr().add(align_offset).cast()
777    }
778  }
779
780  /// Returns an aligned pointer to the memory at the given offset.
781  /// If the allocator is read-only, then this method will return a null pointer.
782  ///
783  /// ## Safety
784  /// - `offset..offset + mem::size_of::<T>() + padding` must be allocated memory.
785  /// - `offset` must be less than the capacity of the allocator.
786  ///
787  /// # Panic
788  /// - If the allocator is read-only, then this method will panic.
789  unsafe fn get_aligned_pointer_mut<T>(&self, offset: usize) -> core::ptr::NonNull<T> {
790    unsafe {
791      assert!(!self.read_only(), "ARENA is read-only");
792
793      if offset == 0 {
794        return NonNull::dangling();
795      }
796
797      let align_offset = align_offset::<T>(offset as u32) as usize;
798      let ptr = self.raw_mut_ptr().add(align_offset).cast();
799      NonNull::new_unchecked(ptr)
800    }
801  }
802
803  /// Returns a bytes slice from the allocator.
804  ///
805  /// ## Safety
806  /// - `offset..offset + size` must be allocated memory.
807  /// - `offset` must be less than the capacity of the allocator.
808  /// - `size` must be less than the capacity of the allocator.
809  /// - `offset + size` must be less than the capacity of the allocator.
810  unsafe fn get_bytes(&self, offset: usize, size: usize) -> &[u8] {
811    unsafe {
812      if size == 0 {
813        return &[];
814      }
815
816      let ptr = self.get_pointer(offset);
817      core::slice::from_raw_parts(ptr, size)
818    }
819  }
820
821  /// Returns a `u8` from the allocator.
822  fn get_u8(&self, offset: usize) -> Result<u8, Error> {
823    let allocated = self.allocated();
824    if offset >= allocated {
825      return Err(Error::OutOfBounds { offset, allocated });
826    }
827
828    let buf = unsafe {
829      let ptr = self.raw_ptr().add(offset);
830      core::slice::from_raw_parts(ptr, 1)
831    };
832
833    Ok(buf[0])
834  }
835
836  /// Returns a `i8` from the allocator.
837  fn get_i8(&self, offset: usize) -> Result<i8, Error> {
838    let allocated = self.allocated();
839    if offset >= allocated {
840      return Err(Error::OutOfBounds { offset, allocated });
841    }
842
843    let buf = unsafe {
844      let ptr = self.raw_ptr().add(offset);
845      core::slice::from_raw_parts(ptr, 1)
846    };
847
848    Ok(buf[0] as i8)
849  }
850
851  /// Returns a `u8` from the allocator without bounds checking.
852  ///
853  /// ## Safety
854  /// - `offset + size` must be within the allocated memory of the allocator.
855  unsafe fn get_u8_unchecked(&self, offset: usize) -> u8 {
856    let buf = unsafe {
857      let ptr = self.raw_ptr().add(offset);
858      core::slice::from_raw_parts(ptr, 1)
859    };
860
861    buf[0]
862  }
863
864  /// Returns a `i8` from the allocator without bounds checking.
865  ///
866  /// ## Safety
867  /// - `offset + size` must be within the allocated memory of the allocator.
868  unsafe fn get_i8_unchecked(&self, offset: usize) -> i8 {
869    let buf = unsafe {
870      let ptr = self.raw_ptr().add(offset);
871      core::slice::from_raw_parts(ptr, 1)
872    };
873
874    buf[0] as i8
875  }
876
877  define_bytes_utils!(
878    u16:"be",
879    u16:"le",
880    u32:"be",
881    u32:"le",
882    u64:"be",
883    u64:"le",
884    u128:"be",
885    u128:"le",
886    i16:"be",
887    i16:"le",
888    i32:"be",
889    i32:"le",
890    i64:"be",
891    i64:"le",
892    i128:"be",
893    i128:"le",
894  );
895
896  define_leb128_utils!(
897    i16:3,
898    i32:5,
899    i64:10,
900    i128:19,
901    u16:3,
902    u32:5,
903    u64:10,
904    u128:19,
905  );
906
907  /// Returns a mutable bytes slice from the allocator.
908  /// If the allocator is read-only, then this method will return an empty slice.
909  ///
910  /// ## Safety
911  /// - `offset..offset + size` must be allocated memory.
912  /// - `offset` must be less than the capacity of the allocator.
913  /// - `size` must be less than the capacity of the allocator.
914  /// - `offset + size` must be less than the capacity of the allocator.
915  ///
916  /// # Panic
917  /// - If the allocator is read-only, then this method will panic.
918  #[allow(clippy::mut_from_ref)]
919  unsafe fn get_bytes_mut(&self, offset: usize, size: usize) -> &mut [u8] {
920    unsafe {
921      if size == 0 {
922        return &mut [];
923      }
924
925      let ptr = self.get_pointer_mut(offset);
926      core::slice::from_raw_parts_mut(ptr, size)
927    }
928  }
929
930  /// Forcelly increases the discarded bytes.
931  ///
932  /// ## Example
933  ///
934  /// ```rust
935  /// use rarena_allocator::{sync::Arena, Options, Allocator};
936  ///
937  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
938  /// arena.increase_discarded(100);
939  /// ```
940  fn increase_discarded(&self, size: u32);
941
942  /// Returns `true` if the allocator is created through memory map.
943  ///
944  /// ## Example
945  ///
946  /// ```rust
947  /// use rarena_allocator::{sync::Arena, Allocator, Options};
948  ///
949  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
950  /// let is_map = arena.is_map();
951  /// assert_eq!(is_map, false);
952  /// ```
953  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
954  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
955  #[inline]
956  fn is_map(&self) -> bool {
957    self.as_ref().flag.contains(MemoryFlags::MMAP)
958  }
959
960  /// Returns `true` if the allocator is on disk.
961  ///
962  /// ## Example
963  ///
964  /// ```rust
965  /// use rarena_allocator::{sync::Arena, Options, Allocator};
966  ///
967  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
968  /// let is_ondisk = arena.is_ondisk();
969  /// assert_eq!(is_ondisk, false);
970  /// ```
971  #[inline]
972  fn is_ondisk(&self) -> bool {
973    self.as_ref().flag.contains(MemoryFlags::ON_DISK)
974  }
975
976  /// Returns `true` if the allocator is in memory.
977  ///
978  /// ## Example
979  ///
980  /// ```rust
981  /// use rarena_allocator::{sync::Arena, Options, Allocator};
982  ///
983  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
984  /// let is_inmemory = arena.is_inmemory();
985  /// assert_eq!(is_inmemory, true);
986  /// ```
987  #[inline]
988  fn is_inmemory(&self) -> bool {
989    !self.is_ondisk()
990  }
991
992  /// Returns `true` if the allocator is on-disk and created through memory map.
993  ///
994  /// ## Example
995  ///
996  /// ```rust
997  /// use rarena_allocator::{sync::Arena, Options, Allocator};
998  ///
999  /// let arena = Options::new().with_capacity(100).map_anon::<Arena>().unwrap();
1000  /// let is_map_anon = arena.is_map_anon();
1001  /// assert_eq!(is_map_anon, true);
1002  /// ```
1003  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1004  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1005  fn is_map_anon(&self) -> bool {
1006    self.is_map() && !self.is_ondisk()
1007  }
1008
1009  /// Returns `true` if the allocator is on-disk and created through memory map.
1010  ///
1011  /// ## Example
1012  ///
1013  /// ```rust
1014  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1015  ///
1016  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1017  /// let is_map_file = arena.is_map_file();
1018  /// assert_eq!(is_map_file, false);
1019  /// ```
1020  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1021  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1022  fn is_map_file(&self) -> bool {
1023    self.is_map() && self.is_ondisk()
1024  }
1025
1026  /// Locks the underlying file for exclusive access, only works on mmap with a file backend.
1027  ///
1028  /// ## Example
1029  ///
1030  /// ```rust
1031  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1032  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
1033  /// # std::fs::remove_file(&path);
1034  ///
1035  ///
1036  ///
1037  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
1038  /// arena.lock_exclusive().unwrap();
1039  ///
1040  /// # std::fs::remove_file(path);
1041  /// ```
1042  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1043  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1044  #[inline]
1045  fn lock_exclusive(&self) -> std::io::Result<()> {
1046    self.as_ref().lock_exclusive()
1047  }
1048
1049  /// Locks the underlying file for shared access, only works on mmap with a file backend.
1050  ///
1051  /// ## Example
1052  ///
1053  /// ```rust
1054  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1055  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
1056  /// # std::fs::remove_file(&path);
1057  ///
1058  ///
1059  ///
1060  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
1061  /// arena.lock_shared().unwrap();
1062  ///
1063  /// # std::fs::remove_file(path);
1064  /// ```
1065  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1066  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1067  #[inline]
1068  fn lock_shared(&self) -> std::io::Result<()> {
1069    self.as_ref().lock_shared()
1070  }
1071
1072  /// Returns the magic version of the allocator. This value can be used to check the compatibility for application using
1073  /// [`Allocator`].
1074  ///
1075  /// ## Example
1076  ///
1077  /// ```rust
1078  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1079  ///
1080  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1081  /// let magic_version = arena.magic_version();
1082  /// ```
1083  fn magic_version(&self) -> u16;
1084
1085  /// Returns the whole main memory of the allocator as a byte slice.
1086  ///
1087  /// ## Example
1088  ///
1089  /// ```rust
1090  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1091  ///
1092  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1093  /// let memory = arena.memory();
1094  /// ```
1095  #[inline]
1096  fn memory(&self) -> &[u8] {
1097    unsafe { core::slice::from_raw_parts(self.raw_ptr(), self.capacity()) }
1098  }
1099
1100  /// Calculates the checksum of the allocated memory (excluding the reserved memory specified by users through [`Options::with_reserved`]) of the allocator.
1101  fn checksum<S: BuildChecksumer>(&self, cks: &S) -> u64 {
1102    let allocated_memory = self.allocated_memory(); // Get the memory to be checksummed
1103    let reserved = self.reserved_slice().len();
1104    let data = &allocated_memory[reserved..];
1105
1106    let page_size = self.page_size(); // Get the size of each page
1107
1108    let total_len = data.len(); // Total length of the allocated memory
1109    let full_pages = total_len / page_size; // Calculate how many full pages there are
1110    let remaining_bytes = total_len % page_size; // Calculate the number of remaining bytes
1111
1112    let mut hasher = cks.build_checksumer(); // Create the hasher
1113
1114    // Iterate over each full page
1115    for page_id in 0..full_pages {
1116      let start = page_id * page_size;
1117      let end = start + page_size;
1118
1119      // Feed each page's slice into the hasher
1120      hasher.update(&data[start..end]);
1121    }
1122
1123    // Handle any remaining bytes that don’t fill a full page
1124    if remaining_bytes > 0 {
1125      let start = full_pages * page_size;
1126      hasher.update(&data[start..total_len]); // Process the remaining bytes
1127    }
1128
1129    // Finalize and return the checksum
1130    hasher.digest()
1131  }
1132
1133  /// Returns the minimum segment size of the allocator.
1134  ///
1135  /// ## Example
1136  ///
1137  /// ```rust
1138  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1139  ///
1140  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1141  /// let min_segment_size = arena.minimum_segment_size();
1142  /// ```
1143  fn minimum_segment_size(&self) -> u32;
1144
1145  /// Sets the minimum segment size of the allocator.
1146  ///
1147  /// ## Example
1148  ///
1149  /// ```rust
1150  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1151  ///
1152  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1153  /// arena.set_minimum_segment_size(100);
1154  /// ```
1155  fn set_minimum_segment_size(&self, size: u32);
1156
1157  /// Returns `true` if the allocator is unify memory layout.
1158  ///
1159  /// ## Example
1160  ///
1161  /// ```rust
1162  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1163  ///
1164  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1165  /// assert_eq!(arena.unify(), false);
1166  ///
1167  /// let arena = Options::new().with_capacity(100).with_unify(true).alloc::<Arena>().unwrap();
1168  /// assert_eq!(arena.unify(), true);
1169  /// ```
1170  #[inline]
1171  fn unify(&self) -> bool {
1172    self.as_ref().unify()
1173  }
1174
1175  /// Returns the path of the mmap file, only returns `Some` when the ARENA is backed by a mmap file.
1176  ///
1177  /// ## Example
1178  ///
1179  /// ```rust
1180  /// # use rarena_allocator::{unsync::Arena, Allocator, Options};
1181  ///
1182  /// # let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1183  /// let path = arena.path();
1184  /// ```
1185  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1186  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1187  fn path(&self) -> Option<&Self::Path>;
1188
1189  /// `mlock(ptr, len)`—Lock memory into RAM.
1190  ///
1191  /// ## Safety
1192  ///
1193  /// This function operates on raw pointers, but it should only be used on
1194  /// memory which the caller owns. Technically, locking memory shouldn't violate
1195  /// any invariants, but since unlocking it can violate invariants, this
1196  /// function is also unsafe for symmetry.
1197  ///
1198  /// Some implementations implicitly round the memory region out to the nearest
1199  /// page boundaries, so this function may lock more memory than explicitly
1200  /// requested if the memory isn't page-aligned. Other implementations fail if
1201  /// the memory isn't page-aligned.
1202  ///
1203  /// # References
1204  ///  - [POSIX]
1205  ///  - [Linux]
1206  ///  - [Apple]
1207  ///  - [FreeBSD]
1208  ///  - [NetBSD]
1209  ///  - [OpenBSD]
1210  ///  - [DragonFly BSD]
1211  ///  - [illumos]
1212  ///  - [glibc]
1213  ///
1214  /// [POSIX]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/mlock.html
1215  /// [Linux]: https://man7.org/linux/man-pages/man2/mlock.2.html
1216  /// [Apple]: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/mlock.2.html
1217  /// [FreeBSD]: https://man.freebsd.org/cgi/man.cgi?query=mlock&sektion=2
1218  /// [NetBSD]: https://man.netbsd.org/mlock.2
1219  /// [OpenBSD]: https://man.openbsd.org/mlock.2
1220  /// [DragonFly BSD]: https://man.dragonflybsd.org/?command=mlock&section=2
1221  /// [illumos]: https://illumos.org/man/3C/mlock
1222  /// [glibc]: https://www.gnu.org/software/libc/manual/html_node/Page-Lock-Functions.html#index-mlock
1223  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1224  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1225  unsafe fn mlock(&self, offset: usize, len: usize) -> std::io::Result<()> {
1226    unsafe { self.as_ref().mlock(offset, len) }
1227  }
1228
1229  /// `munlock(ptr, len)`—Unlock memory.
1230  ///
1231  /// ## Safety
1232  ///
1233  /// This function operates on raw pointers, but it should only be used on
1234  /// memory which the caller owns, to avoid compromising the `mlock` invariants
1235  /// of other unrelated code in the process.
1236  ///
1237  /// Some implementations implicitly round the memory region out to the nearest
1238  /// page boundaries, so this function may unlock more memory than explicitly
1239  /// requested if the memory isn't page-aligned.
1240  ///
1241  /// # References
1242  ///  - [POSIX]
1243  ///  - [Linux]
1244  ///  - [Apple]
1245  ///  - [FreeBSD]
1246  ///  - [NetBSD]
1247  ///  - [OpenBSD]
1248  ///  - [DragonFly BSD]
1249  ///  - [illumos]
1250  ///  - [glibc]
1251  ///
1252  /// [POSIX]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/munlock.html
1253  /// [Linux]: https://man7.org/linux/man-pages/man2/munlock.2.html
1254  /// [Apple]: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/munlock.2.html
1255  /// [FreeBSD]: https://man.freebsd.org/cgi/man.cgi?query=munlock&sektion=2
1256  /// [NetBSD]: https://man.netbsd.org/munlock.2
1257  /// [OpenBSD]: https://man.openbsd.org/munlock.2
1258  /// [DragonFly BSD]: https://man.dragonflybsd.org/?command=munlock&section=2
1259  /// [illumos]: https://illumos.org/man/3C/munlock
1260  /// [glibc]: https://www.gnu.org/software/libc/manual/html_node/Page-Lock-Functions.html#index-munlock
1261  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1262  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1263  unsafe fn munlock(&self, offset: usize, len: usize) -> std::io::Result<()> {
1264    unsafe { self.as_ref().munlock(offset, len) }
1265  }
1266
1267  /// Returns the offset to the start of the allocator.
1268  ///
1269  /// ## Safety
1270  /// - `ptr` must be allocated by this allocator.
1271  unsafe fn offset(&self, ptr: *const u8) -> usize;
1272
1273  /// Returns the page size.
1274  ///
1275  /// If in no-std environment, then this method will return `4096`.
1276  /// Otherwise, it will return the system's page size.
1277  ///
1278  /// ## Example
1279  ///
1280  /// ```rust
1281  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1282  ///
1283  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1284  /// let page_size = arena.page_size();
1285  /// ```
1286  fn page_size(&self) -> usize;
1287
1288  /// Returns `true` if the arena is read-only.
1289  ///
1290  /// ## Example
1291  ///
1292  /// ```rust
1293  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1294  ///
1295  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1296  /// let read_only = arena.read_only();
1297  /// ```
1298  #[inline]
1299  fn read_only(&self) -> bool {
1300    self.as_ref().read_only()
1301  }
1302
1303  /// Returns the number of references to the allocator.
1304  ///
1305  /// ## Example
1306  ///
1307  /// ```rust
1308  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1309  ///
1310  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1311  /// let refs = arena.refs();
1312  /// ```
1313  fn refs(&self) -> usize;
1314
1315  /// Returns the number of bytes remaining bytes can be allocated by the allocator.
1316  ///
1317  /// ## Example
1318  ///
1319  /// ```rust
1320  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1321  ///
1322  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1323  /// let remaining = arena.remaining();
1324  /// ```
1325  fn remaining(&self) -> usize;
1326
1327  /// Sets remove on drop, only works on mmap with a file backend.
1328  ///
1329  /// Default is `false`.
1330  ///
1331  /// > **WARNING:** Once set to `true`, the backed file will be removed when the allocator is dropped, even though the file is opened in
1332  /// > read-only mode.
1333  ///
1334  /// ## Example
1335  ///
1336  /// ```rust
1337  /// # use rarena_allocator::{sync::Arena, Options, Allocator};
1338  ///
1339  /// # let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1340  /// arena.remove_on_drop(true);
1341  /// ```
1342  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1343  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1344  fn remove_on_drop(&self, remove_on_drop: bool) {
1345    self.as_ref().set_remove_on_drop(remove_on_drop);
1346  }
1347
1348  /// Set back the allocator's main memory cursor to the given position.
1349  ///
1350  /// ## Safety
1351  /// - If the current position is larger than the given position,
1352  ///   then the memory between the current position and the given position will be reclaimed,
1353  ///   so must ensure the memory chunk between the current position and the given position will not
1354  ///   be accessed anymore.
1355  /// - This method is not thread safe.
1356  unsafe fn rewind(&self, pos: ArenaPosition);
1357
1358  /// Try to lock the underlying file for exclusive access, only works on mmap with a file backend.
1359  ///
1360  /// ## Example
1361  ///
1362  /// ```rust
1363  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1364  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
1365  /// # std::fs::remove_file(&path);
1366  ///
1367  ///
1368  ///
1369  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
1370  /// arena.try_lock_exclusive().unwrap();
1371  ///
1372  /// # std::fs::remove_file(path);
1373  /// ```
1374  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1375  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1376  #[inline]
1377  fn try_lock_exclusive(&self) -> std::io::Result<()> {
1378    self.as_ref().try_lock_exclusive()
1379  }
1380
1381  /// Try to lock the underlying file for shared access, only works on mmap with a file backend.
1382  ///
1383  /// ## Example
1384  ///
1385  /// ```rust
1386  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1387  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
1388  /// # std::fs::remove_file(&path);
1389  ///
1390  ///
1391  ///
1392  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
1393  /// arena.try_lock_shared().unwrap();
1394  ///
1395  /// # std::fs::remove_file(path);
1396  /// ```
1397  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1398  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1399  #[inline]
1400  fn try_lock_shared(&self) -> std::io::Result<()> {
1401    self.as_ref().try_lock_shared()
1402  }
1403
1404  /// Unlocks the underlying file, only works on mmap with a file backend.
1405  ///
1406  /// ## Example
1407  ///
1408  /// ```rust
1409  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1410  /// # let path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
1411  /// # std::fs::remove_file(&path);
1412  ///
1413  ///
1414  ///
1415  /// let mut arena = unsafe { Options::new().with_create_new(true).with_read(true).with_write(true).with_capacity(100).map_mut::<Arena, _>(&path).unwrap() };
1416  /// arena.lock_exclusive().unwrap();
1417  ///
1418  /// // do some thing
1419  /// arena.unlock().unwrap();
1420  ///
1421  /// # std::fs::remove_file(path);
1422  /// ```
1423  #[cfg(all(feature = "memmap", not(target_family = "wasm")))]
1424  #[cfg_attr(docsrs, doc(cfg(all(feature = "memmap", not(target_family = "wasm")))))]
1425  #[inline]
1426  fn unlock(&self) -> std::io::Result<()> {
1427    self.as_ref().unlock()
1428  }
1429
1430  /// Returns the version of the allocator.
1431  ///
1432  /// ## Example
1433  ///
1434  /// ```rust
1435  /// use rarena_allocator::{sync::Arena, Options, Allocator};
1436  ///
1437  /// let arena = Options::new().with_capacity(100).alloc::<Arena>().unwrap();
1438  /// let version = arena.version();
1439  /// ```
1440  fn version(&self) -> u16;
1441}