Struct rlsf::Tlsf [−][src]
pub struct Tlsf<'pool, FLBitmap, SLBitmap, const FLLEN: usize, const SLLEN: usize> { /* fields omitted */ }
Expand description
The TLSF header (top-level) data structure.
Data Structure Overview
Properties
The allocation granularity (GRANULARITY
) is size_of::<usize>() * 4
bytes, which is the minimum size of a free block.
The maximum block size is (GRANULARITY << FLLEN) - GRANULARITY
.
Implementations
impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, const SLLEN: usize> Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]
impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, const SLLEN: usize> Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]pub unsafe fn insert_free_block_ptr(
&mut self,
block: NonNull<[u8]>
) -> Option<NonZeroUsize>
[src]
pub unsafe fn insert_free_block_ptr(
&mut self,
block: NonNull<[u8]>
) -> Option<NonZeroUsize>
[src]Create a new memory pool at the location specified by a slice pointer.
Returns the actual number of bytes (counted from the beginning of
block
) used to create the memory pool. This value is necessary to
calculate the start address to pass to Self::append_free_block_ptr
.
This method does nothing and returns None
if the given memory block is
too small.
Time Complexity
This method will complete in linear time (O(block.len())
) because
it might need to divide the memory block to meet the maximum block size
requirement ((GRANULARITY << FLLEN) - GRANULARITY
).
Examples
use rlsf::Tlsf; use std::{mem::MaybeUninit, ptr::NonNull}; static mut POOL: MaybeUninit<[u8; 1024]> = MaybeUninit::uninit(); let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT; unsafe { tlsf.insert_free_block_ptr(NonNull::new(POOL.as_mut_ptr()).unwrap()); }
Safety
The memory block will be considered owned by self
. The memory block
must outlive self
.
Panics
This method never panics.
pub unsafe fn append_free_block_ptr(&mut self, block: NonNull<[u8]>) -> usize
[src]
pub unsafe fn append_free_block_ptr(&mut self, block: NonNull<[u8]>) -> usize
[src]Extend an existing memory pool by incorporating the specified memory block.
Returns the number of incorporated bytes, counted from the beginning of
block
.
In the current implementation, this method can coalesce memory pools
only if the maximum pool size is outside the range of usize
, i.e.,
log2(GRANULARITY) + FLLEN >= usize::BITS
. This is because it does not
track each pool’s size and cannot check whether the resulting pool will
have a valid size.
Time Complexity
This method will complete in linear time (O(block.len())
) because
it might need to divide the memory block to meet the maximum block size
requirement ((GRANULARITY << FLLEN) - GRANULARITY
).
Examples
use rlsf::Tlsf; use std::{mem::MaybeUninit, ptr::NonNull}; static mut POOL: MaybeUninit<[u8; 1024]> = MaybeUninit::uninit(); let mut cursor = unsafe { POOL.as_mut_ptr() } as *mut u8; let mut remaining_len = 1024; let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT; let pool0_len = unsafe { tlsf.insert_free_block_ptr(nonnull_slice_from_raw_parts( NonNull::new(cursor).unwrap(), remaining_len / 2)) }.unwrap().get(); cursor = cursor.wrapping_add(pool0_len); remaining_len -= pool0_len; let pool1_len = unsafe { tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts( NonNull::new(cursor).unwrap(), remaining_len / 2)) }; cursor = cursor.wrapping_add(pool1_len); remaining_len -= pool1_len; let pool2_len = unsafe { tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts( NonNull::new(cursor).unwrap(), remaining_len)) }; cursor = cursor.wrapping_add(pool2_len); remaining_len -= pool2_len; // polyfill for <https://github.com/rust-lang/rust/issues/71941> fn nonnull_slice_from_raw_parts<T>(ptr: NonNull<T>, len: usize) -> NonNull<[T]> { NonNull::new(std::ptr::slice_from_raw_parts_mut(ptr.as_ptr(), len)).unwrap() }
Safety
The memory block will be considered owned by self
. The memory block
must outlive self
.
block
’s starting address must match an existing memory pool’s
ending address. See the above example for how to obtain one.
Panics
This method never panics.
pub fn insert_free_block(
&mut self,
block: &'pool mut [MaybeUninit<u8>]
) -> impl Send + Sync
[src]
pub fn insert_free_block(
&mut self,
block: &'pool mut [MaybeUninit<u8>]
) -> impl Send + Sync
[src]Create a new memory pool at the location specified by a slice.
This method does nothing if the given memory block is too small.
(The return type is yet to be determined.)
Time Complexity
See Self::insert_free_block_ptr
.
Examples
use rlsf::Tlsf; use std::mem::MaybeUninit; let mut pool = [MaybeUninit::uninit(); 1024]; let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT; tlsf.insert_free_block(&mut pool);
The insertred memory block must outlive self
:
use rlsf::Tlsf; use std::mem::MaybeUninit; let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT; let mut pool = [MaybeUninit::uninit(); 1024]; tlsf.insert_free_block(&mut pool); drop(pool); // dropping the memory block first is not allowed drop(tlsf);
Panics
This method never panics.
pub fn allocate(&mut self, layout: Layout) -> Option<NonNull<u8>>
[src]
pub fn allocate(&mut self, layout: Layout) -> Option<NonNull<u8>>
[src]Attempt to allocate a block of memory.
Returns the starting address of the allocated memory block on success;
None
otherwise.
Time Complexity
This method will complete in constant time.
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, align: usize)
[src]
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, align: usize)
[src]Deallocate a previously allocated memory block.
Time Complexity
This method will complete in constant time.
Safety
ptr
must denote a memory block previously allocated viaself
.- The memory block must have been allocated with the same alignment
(
Layout::align
) asalign
.
pub unsafe fn reallocate(
&mut self,
ptr: NonNull<u8>,
new_layout: Layout
) -> Option<NonNull<u8>>
[src]
pub unsafe fn reallocate(
&mut self,
ptr: NonNull<u8>,
new_layout: Layout
) -> Option<NonNull<u8>>
[src]Shrink or grow a previously allocated memory block.
Returns the new starting address of the memory block on success;
None
otherwise.
Time Complexity
Unlike other methods, this method will complete in linear time
(O(old_size)
).
Safety
ptr
must denote a memory block previously allocated viaself
.- The memory block must have been allocated with the same alignment
(
Layout::align
) asnew_layout
.
Trait Implementations
impl<'pool, FLBitmap: Debug, SLBitmap: Debug, const FLLEN: usize, const SLLEN: usize> Debug for Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]
impl<'pool, FLBitmap: Debug, SLBitmap: Debug, const FLLEN: usize, const SLLEN: usize> Debug for Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]impl<FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, const SLLEN: usize> Default for Tlsf<'_, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]
impl<FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, const SLLEN: usize> Default for Tlsf<'_, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]impl<FLBitmap, SLBitmap, const FLLEN: usize, const SLLEN: usize> Send for Tlsf<'_, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]
impl<FLBitmap, SLBitmap, const FLLEN: usize, const SLLEN: usize> Sync for Tlsf<'_, FLBitmap, SLBitmap, FLLEN, SLLEN>
[src]
Auto Trait Implementations
impl<'pool, FLBitmap, SLBitmap, const FLLEN: usize, const SLLEN: usize> RefUnwindSafe for Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN> where
FLBitmap: RefUnwindSafe,
SLBitmap: RefUnwindSafe,
FLBitmap: RefUnwindSafe,
SLBitmap: RefUnwindSafe,
impl<'pool, FLBitmap, SLBitmap, const FLLEN: usize, const SLLEN: usize> Unpin for Tlsf<'pool, FLBitmap, SLBitmap, FLLEN, SLLEN> where
FLBitmap: Unpin,
SLBitmap: Unpin,
FLBitmap: Unpin,
SLBitmap: Unpin,