use std::sync::atomic::Ordering;
use crate::matrix::core::{ AtomicMatrix, BlockHeader, RelativePtr };
use memmap2::MmapMut;
pub const USER_STATE_MIN: u32 = 49;
#[derive(Debug, PartialEq)]
pub enum HandlerError {
AllocationFailed(String),
ReservedStatus(u32),
TransitionFailed(u32),
InvalidOffset(u32),
}
pub struct Block<T> {
pointer: RelativePtr<T>,
}
pub struct SharedHandler {
matrix_addr: usize,
base_addr: usize,
segment_size: u32,
first_block_offset: u32,
}
pub struct MatrixHandler {
matrix: &'static mut AtomicMatrix,
mmap: MmapMut,
first_block_offset: u32,
}
impl<T> Block<T> {
pub(crate) fn from_offset(offset: u32) -> Self {
Self { pointer: RelativePtr::new(offset) }
}
}
impl MatrixHandler {
pub(crate) fn new(
matrix: &'static mut AtomicMatrix,
mmap: MmapMut,
first_block_offset: u32
) -> Self {
Self { matrix, mmap, first_block_offset }
}
pub fn share(&self) -> SharedHandler {
SharedHandler {
matrix_addr: self.matrix as *const AtomicMatrix as usize,
base_addr: self.base_ptr() as usize,
segment_size: self.segment_size(),
first_block_offset: self.first_block_offset,
}
}
}
impl HandlerFunctions for MatrixHandler {
fn base_ptr(&self) -> *const u8 { self.mmap.as_ptr() }
fn matrix(&self) -> &AtomicMatrix { self.matrix }
fn first_block_offset(&self) -> u32 { self.first_block_offset }
fn segment_size(&self) -> u32 { self.mmap.len() as u32 }
}
unsafe impl Send for SharedHandler {}
unsafe impl Sync for SharedHandler {}
impl HandlerFunctions for SharedHandler {
fn base_ptr(&self) -> *const u8 { self.base_addr as *const u8 }
fn matrix(&self) -> &AtomicMatrix {
unsafe { &*(self.matrix_addr as *const AtomicMatrix) }
}
fn first_block_offset(&self) -> u32 { self.first_block_offset }
fn segment_size(&self) -> u32 { self.segment_size }
}
pub trait HandlerFunctions {
fn base_ptr(&self) -> *const u8;
fn matrix(&self) -> &AtomicMatrix;
fn first_block_offset(&self) -> u32;
fn segment_size(&self) -> u32;
fn allocate<T>(&self) -> Result<Block<T>, HandlerError> {
let size = (std::mem::size_of::<T>() as u32).max(16);
self.matrix()
.allocate(self.base_ptr(), size)
.map(|ptr| Block::from_offset(ptr.offset()))
.map_err(HandlerError::AllocationFailed)
}
fn allocate_raw(&self, size: u32) -> Result<RelativePtr<u8>, HandlerError> {
self.matrix()
.allocate(self.base_ptr(), size)
.map_err(HandlerError::AllocationFailed)
}
unsafe fn write<T>(&self, block: &mut Block<T>, value: T) {
unsafe { block.pointer.write(self.base_ptr(), value) }
}
unsafe fn read<'a, T>(&self, block: &Block<T>) -> &'a T {
unsafe { block.pointer.resolve(self.base_ptr()) }
}
unsafe fn read_mut<'a, T>(&self, block: &Block<T>) -> &'a mut T {
unsafe { block.pointer.resolve_mut(self.base_ptr()) }
}
fn free<T>(&self, block: Block<T>) {
let header_ptr = RelativePtr::<BlockHeader>::new(block.pointer.offset() - 32);
self.matrix().ack(&header_ptr, self.base_ptr());
}
fn free_at(&self, header_offset: u32) {
let header_ptr = RelativePtr::<BlockHeader>::new(header_offset);
self.matrix().ack(&header_ptr, self.base_ptr());
}
fn set_state<T>(&self, block: &Block<T>, state: u32) -> Result<(), HandlerError> {
if state < USER_STATE_MIN {
return Err(HandlerError::ReservedStatus(state));
}
unsafe {
block.pointer
.resolve_header_mut(self.base_ptr())
.state
.store(state, Ordering::Release);
}
Ok(())
}
fn get_state<T>(&self, block: &Block<T>, order: Ordering) -> u32 {
unsafe {
block.pointer
.resolve_header(self.base_ptr())
.state
.load(order)
}
}
fn transition_state<T>(
&self,
block: &Block<T>,
expected: u32,
next: u32,
success_order: Ordering
) -> Result<u32, HandlerError> {
if next < USER_STATE_MIN {
return Err(HandlerError::ReservedStatus(next));
}
unsafe {
block.pointer
.resolve_header_mut(self.base_ptr())
.state
.compare_exchange(expected, next, success_order, Ordering::Relaxed)
.map_err(HandlerError::TransitionFailed)
}
}
fn raw_matrix(&self) -> &AtomicMatrix {
self.matrix()
}
fn raw_base_ptr(&self) -> *const u8 {
self.base_ptr()
}
}