use std::alloc::{alloc, dealloc, Layout};
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use std::sync::Mutex;
pub type RefCount = AtomicU32;
#[repr(C)]
pub struct RcHeader {
strong: RefCount,
weak: RefCount,
color: AtomicU32,
buffered: AtomicU32,
type_info: *const TypeInfo,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u32)]
pub enum GcColor {
White = 0,
Gray = 1,
Black = 2,
Purple = 3,
}
impl RcHeader {
pub fn new(type_info: *const TypeInfo) -> Self {
Self {
strong: AtomicU32::new(1),
weak: AtomicU32::new(1), color: AtomicU32::new(GcColor::Black as u32),
buffered: AtomicU32::new(0),
type_info,
}
}
#[inline]
pub fn strong_count(&self) -> u32 {
self.strong.load(Ordering::Relaxed)
}
#[inline]
pub fn weak_count(&self) -> u32 {
self.weak.load(Ordering::Relaxed).saturating_sub(1)
}
#[inline]
pub fn inc_strong(&self) {
self.strong.fetch_add(1, Ordering::Relaxed);
}
#[inline]
pub fn dec_strong(&self) -> bool {
self.strong.fetch_sub(1, Ordering::Release) == 1
}
#[inline]
pub fn inc_weak(&self) {
self.weak.fetch_add(1, Ordering::Relaxed);
}
#[inline]
pub fn dec_weak(&self) -> bool {
self.weak.fetch_sub(1, Ordering::Release) == 1
}
#[inline]
pub fn color(&self) -> GcColor {
match self.color.load(Ordering::Relaxed) {
0 => GcColor::White,
1 => GcColor::Gray,
2 => GcColor::Black,
_ => GcColor::Purple,
}
}
#[inline]
pub fn set_color(&self, color: GcColor) {
self.color.store(color as u32, Ordering::Relaxed);
}
#[inline]
pub fn is_buffered(&self) -> bool {
self.buffered.load(Ordering::Relaxed) != 0
}
#[inline]
pub fn set_buffered(&self, buffered: bool) {
self.buffered.store(buffered as u32, Ordering::Relaxed);
}
}
#[repr(C)]
pub struct TypeInfo {
pub name: &'static str,
pub size: usize,
pub align: usize,
pub drop_fn: Option<unsafe fn(*mut u8)>,
pub trace_fn: Option<unsafe fn(*mut u8, &mut dyn FnMut(*mut RcHeader))>,
pub ref_field_count: usize,
pub ref_field_offsets: &'static [usize],
}
impl TypeInfo {
pub const fn leaf(name: &'static str, size: usize, align: usize) -> Self {
Self {
name,
size,
align,
drop_fn: None,
trace_fn: None,
ref_field_count: 0,
ref_field_offsets: &[],
}
}
pub const fn with_drop(
name: &'static str,
size: usize,
align: usize,
drop_fn: unsafe fn(*mut u8),
) -> Self {
Self {
name,
size,
align,
drop_fn: Some(drop_fn),
trace_fn: None,
ref_field_count: 0,
ref_field_offsets: &[],
}
}
pub fn can_cycle(&self) -> bool {
self.ref_field_count > 0 || self.trace_fn.is_some()
}
}
pub fn rc_alloc(type_info: &'static TypeInfo) -> NonNull<RcHeader> {
let header_size = std::mem::size_of::<RcHeader>();
let header_align = std::mem::align_of::<RcHeader>();
let data_offset = (header_size + type_info.align - 1) & !(type_info.align - 1);
let total_size = data_offset + type_info.size;
let total_align = header_align.max(type_info.align);
let layout = Layout::from_size_align(total_size, total_align).expect("Invalid layout");
let ptr = unsafe { alloc(layout) };
if ptr.is_null() {
std::alloc::handle_alloc_error(layout);
}
let header = ptr as *mut RcHeader;
unsafe {
header.write(RcHeader::new(type_info));
}
let data_ptr = unsafe { ptr.add(data_offset) };
unsafe {
std::ptr::write_bytes(data_ptr, 0, type_info.size);
}
NonNull::new(header).expect("Allocation returned null")
}
pub fn rc_data<T>(header: NonNull<RcHeader>) -> NonNull<T> {
let type_info = unsafe { &*(*header.as_ptr()).type_info };
let header_size = std::mem::size_of::<RcHeader>();
let data_offset = (header_size + type_info.align - 1) & !(type_info.align - 1);
let data_ptr = unsafe { (header.as_ptr() as *mut u8).add(data_offset) as *mut T };
NonNull::new(data_ptr).expect("Data pointer is null")
}
pub fn rc_inc(header: NonNull<RcHeader>) {
unsafe { (*header.as_ptr()).inc_strong() };
}
pub fn rc_dec(header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.dec_strong() {
let type_info = unsafe { &*hdr.type_info };
if let Some(drop_fn) = type_info.drop_fn {
let data_ptr = rc_data::<u8>(header);
unsafe { drop_fn(data_ptr.as_ptr()) };
}
if type_info.can_cycle() {
CYCLE_COLLECTOR.add_candidate(header);
}
rc_dec_weak(header);
}
}
pub fn rc_dec_weak(header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.dec_weak() {
let type_info = unsafe { &*hdr.type_info };
let header_size = std::mem::size_of::<RcHeader>();
let header_align = std::mem::align_of::<RcHeader>();
let data_offset = (header_size + type_info.align - 1) & !(type_info.align - 1);
let total_size = data_offset + type_info.size;
let total_align = header_align.max(type_info.align);
let layout = Layout::from_size_align(total_size, total_align).expect("Invalid layout");
unsafe {
dealloc(header.as_ptr() as *mut u8, layout);
}
}
}
pub struct CycleCollector {
candidates: Mutex<Vec<NonNull<RcHeader>>>,
roots: Mutex<Vec<NonNull<RcHeader>>>,
stats: GcStats,
}
unsafe impl Send for CycleCollector {}
unsafe impl Sync for CycleCollector {}
#[derive(Default)]
pub struct GcStats {
pub allocations: AtomicUsize,
pub deallocations: AtomicUsize,
pub cycle_collections: AtomicUsize,
pub cycles_broken: AtomicUsize,
pub live_objects: AtomicUsize,
}
impl CycleCollector {
pub const fn new() -> Self {
Self {
candidates: Mutex::new(Vec::new()),
roots: Mutex::new(Vec::new()),
stats: GcStats {
allocations: AtomicUsize::new(0),
deallocations: AtomicUsize::new(0),
cycle_collections: AtomicUsize::new(0),
cycles_broken: AtomicUsize::new(0),
live_objects: AtomicUsize::new(0),
},
}
}
pub fn add_candidate(&self, header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.is_buffered() {
return;
}
hdr.set_buffered(true);
hdr.set_color(GcColor::Purple);
let mut candidates = self.candidates.lock().unwrap();
candidates.push(header);
if candidates.len() > 1000 {
drop(candidates);
self.collect();
}
}
pub fn collect(&self) {
self.stats.cycle_collections.fetch_add(1, Ordering::Relaxed);
self.mark_roots();
self.scan_roots();
self.collect_white();
}
fn mark_roots(&self) {
let mut candidates = self.candidates.lock().unwrap();
let mut roots = self.roots.lock().unwrap();
for &header in candidates.iter() {
let hdr = unsafe { &*header.as_ptr() };
if hdr.color() == GcColor::Purple {
self.mark_gray(header);
roots.push(header);
} else {
hdr.set_buffered(false);
if hdr.color() == GcColor::Black && hdr.strong_count() == 0 {
}
}
}
candidates.clear();
}
fn mark_gray(&self, header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.color() != GcColor::Gray {
hdr.set_color(GcColor::Gray);
let type_info = unsafe { &*hdr.type_info };
if let Some(trace_fn) = type_info.trace_fn {
let data_ptr = rc_data::<u8>(header);
unsafe {
trace_fn(data_ptr.as_ptr(), &mut |child| {
let child_hdr = &*child;
child_hdr.strong.fetch_sub(1, Ordering::Relaxed);
self.mark_gray(NonNull::new_unchecked(child));
});
}
}
}
}
fn scan_roots(&self) {
let roots = self.roots.lock().unwrap();
for &header in roots.iter() {
self.scan(header);
}
}
fn scan(&self, header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.color() == GcColor::Gray {
if hdr.strong_count() > 0 {
self.scan_black(header);
} else {
hdr.set_color(GcColor::White);
let type_info = unsafe { &*hdr.type_info };
if let Some(trace_fn) = type_info.trace_fn {
let data_ptr = rc_data::<u8>(header);
unsafe {
trace_fn(data_ptr.as_ptr(), &mut |child| {
self.scan(NonNull::new_unchecked(child));
});
}
}
}
}
}
fn scan_black(&self, header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
hdr.set_color(GcColor::Black);
let type_info = unsafe { &*hdr.type_info };
if let Some(trace_fn) = type_info.trace_fn {
let data_ptr = rc_data::<u8>(header);
unsafe {
trace_fn(data_ptr.as_ptr(), &mut |child| {
let child_hdr = &*child;
child_hdr.strong.fetch_add(1, Ordering::Relaxed);
if child_hdr.color() != GcColor::Black {
self.scan_black(NonNull::new_unchecked(child));
}
});
}
}
}
fn collect_white(&self) {
let mut roots = self.roots.lock().unwrap();
for &header in roots.iter() {
self.collect_white_node(header);
}
roots.clear();
}
fn collect_white_node(&self, header: NonNull<RcHeader>) {
let hdr = unsafe { &*header.as_ptr() };
if hdr.color() == GcColor::White && !hdr.is_buffered() {
hdr.set_color(GcColor::Black);
let type_info = unsafe { &*hdr.type_info };
if let Some(trace_fn) = type_info.trace_fn {
let data_ptr = rc_data::<u8>(header);
unsafe {
trace_fn(data_ptr.as_ptr(), &mut |child| {
self.collect_white_node(NonNull::new_unchecked(child));
});
}
}
self.stats.cycles_broken.fetch_add(1, Ordering::Relaxed);
rc_dec_weak(header);
}
}
pub fn stats(&self) -> &GcStats {
&self.stats
}
}
static CYCLE_COLLECTOR: CycleCollector = CycleCollector::new();
pub fn gc_collect() {
CYCLE_COLLECTOR.collect();
}
pub fn gc_stats() -> &'static GcStats {
CYCLE_COLLECTOR.stats()
}
pub struct Arena {
chunks: Vec<Vec<u8>>,
pos: usize,
chunk_size: usize,
}
impl Arena {
pub fn new(chunk_size: usize) -> Self {
Self {
chunks: vec![vec![0u8; chunk_size]],
pos: 0,
chunk_size,
}
}
pub fn alloc(&mut self, size: usize, align: usize) -> *mut u8 {
let aligned_pos = (self.pos + align - 1) & !(align - 1);
if aligned_pos + size > self.chunk_size {
let chunk_size = self.chunk_size.max(size);
self.chunks.push(vec![0u8; chunk_size]);
self.pos = 0;
return self.alloc(size, align);
}
let ptr = self.chunks.last_mut().unwrap().as_mut_ptr();
let result = unsafe { ptr.add(aligned_pos) };
self.pos = aligned_pos + size;
result
}
pub fn alloc_value<T>(&mut self, value: T) -> &mut T {
let size = std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
let ptr = self.alloc(size, align) as *mut T;
unsafe {
ptr.write(value);
&mut *ptr
}
}
pub fn reset(&mut self) {
self.pos = 0;
self.chunks.truncate(1);
}
pub fn clear(&mut self) {
self.chunks.clear();
self.chunks.push(vec![0u8; self.chunk_size]);
self.pos = 0;
}
pub fn allocated_bytes(&self) -> usize {
if self.chunks.is_empty() {
0
} else {
(self.chunks.len() - 1) * self.chunk_size + self.pos
}
}
}
impl Drop for Arena {
fn drop(&mut self) {
}
}
pub struct MemoryPool<T> {
free_list: Mutex<Vec<NonNull<T>>>,
capacity: usize,
allocated: AtomicUsize,
}
impl<T> MemoryPool<T> {
pub fn new(capacity: usize) -> Self {
Self {
free_list: Mutex::new(Vec::with_capacity(capacity)),
capacity,
allocated: AtomicUsize::new(0),
}
}
pub fn alloc(&self) -> NonNull<T> {
{
let mut free_list = self.free_list.lock().unwrap();
if let Some(ptr) = free_list.pop() {
self.allocated.fetch_add(1, Ordering::Relaxed);
return ptr;
}
}
let layout = Layout::new::<T>();
let ptr = unsafe { alloc(layout) as *mut T };
if ptr.is_null() {
std::alloc::handle_alloc_error(layout);
}
self.allocated.fetch_add(1, Ordering::Relaxed);
NonNull::new(ptr).expect("Allocation returned null")
}
pub fn free(&self, ptr: NonNull<T>) {
let mut free_list = self.free_list.lock().unwrap();
if free_list.len() < self.capacity {
free_list.push(ptr);
} else {
let layout = Layout::new::<T>();
unsafe {
dealloc(ptr.as_ptr() as *mut u8, layout);
}
}
self.allocated.fetch_sub(1, Ordering::Relaxed);
}
pub fn allocated(&self) -> usize {
self.allocated.load(Ordering::Relaxed)
}
}
impl<T> Drop for MemoryPool<T> {
fn drop(&mut self) {
let free_list = self.free_list.lock().unwrap();
let layout = Layout::new::<T>();
for ptr in free_list.iter() {
unsafe {
dealloc(ptr.as_ptr() as *mut u8, layout);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
static TEST_TYPE_INFO: TypeInfo = TypeInfo::leaf("TestType", 8, 8);
#[test]
fn test_rc_header() {
let header = RcHeader::new(&TEST_TYPE_INFO);
assert_eq!(header.strong_count(), 1);
assert_eq!(header.weak_count(), 0);
assert_eq!(header.color(), GcColor::Black);
assert!(!header.is_buffered());
}
#[test]
fn test_rc_alloc() {
let header = rc_alloc(&TEST_TYPE_INFO);
let hdr = unsafe { &*header.as_ptr() };
assert_eq!(hdr.strong_count(), 1);
rc_inc(header);
assert_eq!(hdr.strong_count(), 2);
rc_dec(header);
assert_eq!(hdr.strong_count(), 1);
rc_dec(header);
}
#[test]
fn test_arena() {
let mut arena = Arena::new(1024);
let ptr1 = arena.alloc(100, 8);
let ptr2 = arena.alloc(200, 16);
assert!(!ptr1.is_null());
assert!(!ptr2.is_null());
assert_ne!(ptr1, ptr2);
let val = arena.alloc_value(42u64);
assert_eq!(*val, 42);
arena.reset();
assert_eq!(arena.pos, 0);
}
#[test]
fn test_memory_pool() {
let pool: MemoryPool<u64> = MemoryPool::new(10);
let ptr1 = pool.alloc();
let ptr2 = pool.alloc();
assert_eq!(pool.allocated(), 2);
pool.free(ptr1);
assert_eq!(pool.allocated(), 1);
let ptr3 = pool.alloc();
assert_eq!(pool.allocated(), 2);
pool.free(ptr2);
pool.free(ptr3);
assert_eq!(pool.allocated(), 0);
}
}