use std::cmp::max;
const FL_INDEX_SHIFT: u32 = 8; const FL_INDEX_MAX: u32 = 32; const FL_INDEX_COUNT: u32 = FL_INDEX_MAX - FL_INDEX_SHIFT + 1;
const SL_INDEX_BITS: u32 = 5;
const SL_INDEX_COUNT: u32 = 1 << SL_INDEX_BITS;
const SMALL_BLOCK_SIZE: u64 = 1 << FL_INDEX_SHIFT;
const NIL: u32 = u32::MAX;
#[derive(Clone, Copy)]
struct BlockNode {
offset: u64,
size: u64,
free: bool,
prev_phys: u32,
next_phys: u32,
prev_free: u32,
next_free: u32,
}
impl BlockNode {
const fn empty() -> Self {
Self {
offset: 0,
size: 0,
free: false,
prev_phys: NIL,
next_phys: NIL,
prev_free: NIL,
next_free: NIL,
}
}
}
#[allow(dead_code)]
pub(crate) struct Tlsf {
capacity: u64,
blocks: Vec<BlockNode>,
free_indices: Vec<u32>,
fl_bitmap: u32,
sl_bitmap: [u32; FL_INDEX_COUNT as usize],
free_heads: Vec<u32>, used_bytes: u64,
free_bytes: u64,
allocation_count: u32,
free_count: u32,
}
#[derive(Debug, Clone, Copy)]
pub(crate) struct TlsfAllocation {
pub offset: u64,
pub size: u64,
pub block_id: u32,
}
#[allow(dead_code)]
impl Tlsf {
pub(crate) fn new(capacity: u64) -> Self {
let mut s = Self {
capacity,
blocks: Vec::with_capacity(64),
free_indices: Vec::new(),
fl_bitmap: 0,
sl_bitmap: [0; FL_INDEX_COUNT as usize],
free_heads: vec![NIL; (FL_INDEX_COUNT * SL_INDEX_COUNT) as usize],
used_bytes: 0,
free_bytes: 0,
allocation_count: 0,
free_count: 0,
};
let id = s.alloc_node(BlockNode {
offset: 0,
size: capacity,
free: false,
prev_phys: NIL,
next_phys: NIL,
prev_free: NIL,
next_free: NIL,
});
s.free_count = 1;
s.free_bytes = capacity;
s.insert_free_block(id);
s
}
pub(crate) fn capacity(&self) -> u64 {
self.capacity
}
pub(crate) fn used_bytes(&self) -> u64 {
self.used_bytes
}
pub(crate) fn free_bytes(&self) -> u64 {
self.free_bytes
}
pub(crate) fn allocation_count(&self) -> u32 {
self.allocation_count
}
pub(crate) fn free_region_count(&self) -> u32 {
self.free_count
}
pub(crate) fn allocate(&mut self, size: u64, alignment: u64) -> Option<TlsfAllocation> {
let alignment = max(1, alignment);
let want = max(SMALL_BLOCK_SIZE, round_up(size, alignment));
let (start_fl, start_sl) = Self::mapping(want);
for fl in start_fl..FL_INDEX_COUNT {
let sl_start = if fl == start_fl { start_sl } else { 0 };
let sl_map = self.sl_bitmap[fl as usize] & (!0u32 << sl_start);
let mut remaining = sl_map;
while remaining != 0 {
let sl = remaining.trailing_zeros();
remaining &= remaining - 1;
let bin = Self::bin_index(fl, sl);
let mut id = self.free_heads[bin];
while id != NIL {
let block = self.blocks[id as usize];
let aligned = round_up(block.offset, alignment);
let head_padding = aligned - block.offset;
if head_padding + want <= block.size {
return Some(self.commit_alloc(id, aligned, want));
}
id = block.next_free;
}
}
}
None
}
fn commit_alloc(&mut self, id: u32, aligned: u64, used_size: u64) -> TlsfAllocation {
let block = self.blocks[id as usize];
let head_padding = aligned - block.offset;
let tail_offset = aligned + used_size;
let tail_padding = block.offset + block.size - tail_offset;
self.remove_free_block(id);
self.free_count -= 1;
self.free_bytes -= block.size;
let mut prev_phys = block.prev_phys;
if head_padding > 0 {
let head_id = self.alloc_node(BlockNode {
offset: block.offset,
size: head_padding,
free: false,
prev_phys,
next_phys: NIL, prev_free: NIL,
next_free: NIL,
});
if prev_phys != NIL {
self.blocks[prev_phys as usize].next_phys = head_id;
}
self.insert_free_block(head_id);
self.free_count += 1;
self.free_bytes += head_padding;
prev_phys = head_id;
}
let used_id = self.alloc_node(BlockNode {
offset: aligned,
size: used_size,
free: false,
prev_phys,
next_phys: NIL, prev_free: NIL,
next_free: NIL,
});
if prev_phys != NIL {
self.blocks[prev_phys as usize].next_phys = used_id;
}
let next_phys = block.next_phys;
if tail_padding > 0 {
let tail_id = self.alloc_node(BlockNode {
offset: tail_offset,
size: tail_padding,
free: false,
prev_phys: used_id,
next_phys,
prev_free: NIL,
next_free: NIL,
});
self.blocks[used_id as usize].next_phys = tail_id;
if next_phys != NIL {
self.blocks[next_phys as usize].prev_phys = tail_id;
}
self.insert_free_block(tail_id);
self.free_count += 1;
self.free_bytes += tail_padding;
} else {
self.blocks[used_id as usize].next_phys = next_phys;
if next_phys != NIL {
self.blocks[next_phys as usize].prev_phys = used_id;
}
}
self.allocation_count += 1;
self.used_bytes += used_size;
self.recycle_node(id);
TlsfAllocation {
offset: aligned,
size: used_size,
block_id: used_id,
}
}
pub(crate) fn free(&mut self, allocation: TlsfAllocation) {
let mut id = allocation.block_id;
debug_assert!(!self.blocks[id as usize].free);
debug_assert_eq!(self.blocks[id as usize].offset, allocation.offset);
let size = self.blocks[id as usize].size;
self.allocation_count -= 1;
self.used_bytes -= size;
self.free_bytes += size;
self.free_count += 1;
let prev = self.blocks[id as usize].prev_phys;
if prev != NIL && self.blocks[prev as usize].free {
self.remove_free_block(prev);
let prev_size = self.blocks[prev as usize].size;
let id_size = self.blocks[id as usize].size;
let id_next = self.blocks[id as usize].next_phys;
self.blocks[prev as usize].size = prev_size + id_size;
self.blocks[prev as usize].next_phys = id_next;
if id_next != NIL {
self.blocks[id_next as usize].prev_phys = prev;
}
self.recycle_node(id);
self.free_count -= 1;
id = prev;
}
let next = self.blocks[id as usize].next_phys;
if next != NIL && self.blocks[next as usize].free {
self.remove_free_block(next);
let id_size = self.blocks[id as usize].size;
let next_size = self.blocks[next as usize].size;
let next_next = self.blocks[next as usize].next_phys;
self.blocks[id as usize].size = id_size + next_size;
self.blocks[id as usize].next_phys = next_next;
if next_next != NIL {
self.blocks[next_next as usize].prev_phys = id;
}
self.recycle_node(next);
self.free_count -= 1;
}
self.insert_free_block(id);
}
fn alloc_node(&mut self, node: BlockNode) -> u32 {
if let Some(idx) = self.free_indices.pop() {
self.blocks[idx as usize] = node;
idx
} else {
let idx = self.blocks.len() as u32;
self.blocks.push(node);
idx
}
}
fn recycle_node(&mut self, id: u32) {
self.blocks[id as usize] = BlockNode::empty();
self.free_indices.push(id);
}
fn mapping(size: u64) -> (u32, u32) {
if size < SMALL_BLOCK_SIZE {
return (0, 0);
}
let fl_raw = 63u32 - size.leading_zeros();
let fl = fl_raw - FL_INDEX_SHIFT;
let sl = ((size >> (fl_raw - SL_INDEX_BITS)) ^ (1 << SL_INDEX_BITS)) as u32;
let fl = fl.min(FL_INDEX_COUNT - 1);
let sl = sl.min(SL_INDEX_COUNT - 1);
(fl, sl)
}
fn bin_index(fl: u32, sl: u32) -> usize {
(fl * SL_INDEX_COUNT + sl) as usize
}
fn insert_free_block(&mut self, id: u32) {
let size = self.blocks[id as usize].size;
let (fl, sl) = Self::mapping(size);
let bin = Self::bin_index(fl, sl);
let head = self.free_heads[bin];
self.blocks[id as usize].free = true;
self.blocks[id as usize].prev_free = NIL;
self.blocks[id as usize].next_free = head;
if head != NIL {
self.blocks[head as usize].prev_free = id;
}
self.free_heads[bin] = id;
self.fl_bitmap |= 1 << fl;
self.sl_bitmap[fl as usize] |= 1 << sl;
}
fn remove_free_block(&mut self, id: u32) {
let size = self.blocks[id as usize].size;
let (fl, sl) = Self::mapping(size);
let bin = Self::bin_index(fl, sl);
let prev = self.blocks[id as usize].prev_free;
let next = self.blocks[id as usize].next_free;
if prev != NIL {
self.blocks[prev as usize].next_free = next;
}
if next != NIL {
self.blocks[next as usize].prev_free = prev;
}
if self.free_heads[bin] == id {
self.free_heads[bin] = next;
if next == NIL {
self.sl_bitmap[fl as usize] &= !(1 << sl);
if self.sl_bitmap[fl as usize] == 0 {
self.fl_bitmap &= !(1 << fl);
}
}
}
self.blocks[id as usize].free = false;
self.blocks[id as usize].prev_free = NIL;
self.blocks[id as usize].next_free = NIL;
}
}
fn round_up(value: u64, alignment: u64) -> u64 {
let mask = alignment - 1;
(value + mask) & !mask
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn allocate_one_then_free() {
let mut t = Tlsf::new(1 << 20);
let a = t.allocate(1024, 256).unwrap();
assert_eq!(a.size, 1024);
assert_eq!(a.offset % 256, 0);
assert_eq!(t.allocation_count(), 1);
t.free(a);
assert_eq!(t.allocation_count(), 0);
assert_eq!(t.free_region_count(), 1);
assert_eq!(t.free_bytes(), 1 << 20);
}
#[test]
fn allocate_many_distinct_sizes() {
let mut t = Tlsf::new(1 << 24);
let mut allocations = Vec::new();
for i in 0..50 {
let size = 256 * (i + 1);
allocations.push(t.allocate(size, 256).unwrap());
}
assert_eq!(t.allocation_count(), 50);
for a in allocations.drain(..) {
t.free(a);
}
assert_eq!(t.allocation_count(), 0);
assert_eq!(t.free_region_count(), 1);
}
#[test]
fn allocations_dont_overlap_each_other_or_extend_capacity() {
let mut t = Tlsf::new(1 << 16);
let a = t.allocate(4096, 256).unwrap();
let b = t.allocate(8192, 256).unwrap();
let c = t.allocate(2048, 256).unwrap();
assert!(disjoint(a.offset, a.size, b.offset, b.size));
assert!(disjoint(a.offset, a.size, c.offset, c.size));
assert!(disjoint(b.offset, b.size, c.offset, c.size));
for x in [a, b, c] {
assert!(x.offset + x.size <= t.capacity());
}
}
#[test]
fn alignment_is_respected() {
let mut t = Tlsf::new(1 << 16);
for align in [256u64, 512, 1024, 2048, 4096] {
let a = t.allocate(1024, align).unwrap();
assert_eq!(a.offset % align, 0, "offset not aligned to {align}");
t.free(a);
}
}
#[test]
fn returns_none_when_full() {
let mut t = Tlsf::new(1 << 12); let a = t.allocate(4096, 256).unwrap();
assert!(t.allocate(256, 256).is_none());
t.free(a);
assert!(t.allocate(256, 256).is_some());
}
fn disjoint(a_off: u64, a_sz: u64, b_off: u64, b_sz: u64) -> bool {
a_off + a_sz <= b_off || b_off + b_sz <= a_off
}
}