#![allow(unused)]
use portable_atomic::{AtomicBool, AtomicI16, AtomicU32, AtomicUsize, Ordering};
use core::cell::UnsafeCell;
type AtomicBufIndex = AtomicI16;
type BufIndex = i16;
type AtomicLogicalIndex = AtomicU32;
type LogicalIndex = u32;
const BUFSIZE: usize = 1024;
#[derive(Default)]
struct AtomicBitfield {
words: [AtomicUsize; BUFSIZE / usize::BITS as usize],
}
impl AtomicBitfield {
const BITCOUNT: usize = usize::BITS.ilog2() as _;
const MASK: usize = !(!0 << Self::BITCOUNT);
const fn new() -> Self {
const NULL: AtomicUsize = AtomicUsize::new(0);
Self {
words: [NULL; BUFSIZE / usize::BITS as usize],
}
}
fn set(&self, index: usize, ordering: Ordering) {
let offset = index >> Self::BITCOUNT;
let shifted = 1 << (index & Self::MASK);
self.words[offset].fetch_or(shifted, ordering);
}
fn set_range(&self, start: usize, end: usize, ordering: Ordering) {
for i in start..end {
self.set(i, ordering);
}
}
fn clear(&self, index: usize, ordering: Ordering) {
let offset = index >> Self::BITCOUNT;
let shifted = 1 << (index & Self::MASK);
self.words[offset].fetch_and(!shifted, ordering);
}
fn clear_range(&self, start: usize, end: usize, ordering: Ordering) {
for i in start..end {
self.clear(i, ordering);
}
}
fn get(&self, index: usize, ordering: Ordering) -> bool {
let offset = index >> Self::BITCOUNT;
let shifted = 1 << (index & Self::MASK);
self.words[offset].load(ordering) & shifted != 0
}
fn get_first_set(&self, start: usize, ordering: Ordering) -> Option<usize> {
for i in start..BUFSIZE {
if self.get(i, ordering) {
return Some(i);
}
}
None
}
}
#[test]
fn test_atomic_bitfield() {
let bf: AtomicBitfield = Default::default();
struct BitGuardedUnsafeCell(UnsafeCell<bool>);
unsafe impl Send for BitGuardedUnsafeCell {}
unsafe impl Sync for BitGuardedUnsafeCell {}
let value = BitGuardedUnsafeCell(UnsafeCell::new(false));
let threads_value = &value;
std::thread::scope(|s| {
bf.set(1, Ordering::Relaxed);
bf.set(32, Ordering::Relaxed);
bf.set(63, Ordering::Relaxed);
for i in 0..BUFSIZE {
assert!(bf.get(i, Ordering::Relaxed) == (i == 1 || i == 32 || i == 63), "Bit {} not as expected", i);
}
bf.set(128, Ordering::Relaxed);
s.spawn(|| {
unsafe { *threads_value.0.get() = true; }
bf.clear(128, Ordering::Release);
});
while bf.get(128, Ordering::Acquire) {}
unsafe { *threads_value.0.get() };
});
}
pub struct Buffer {
buf: UnsafeCell<[u8; BUFSIZE]>,
valid: AtomicBitfield,
cursor: AtomicBufIndex,
buf_start_pos: AtomicLogicalIndex,
read_start: AtomicBufIndex,
}
pub struct Writer<'b>(&'b Buffer);
pub struct Reader<'b>(&'b Buffer);
impl Buffer {
pub const fn new() -> Self {
Buffer {
buf: UnsafeCell::new([0; BUFSIZE]),
valid: AtomicBitfield::new(),
cursor: AtomicBufIndex::new(0),
buf_start_pos: AtomicLogicalIndex::new(0),
read_start: AtomicBufIndex::new(-1),
}
}
pub fn split(&mut self) -> (Writer<'_>, Reader<'_>) {
(Writer(&*self), Reader(&*self))
}
}
impl<'b> Reader<'b> {
pub fn create_reader(&self, start: BufIndex) {
self.0.read_start.store(start, Ordering::Relaxed);
}
pub fn read_earliest(&self, buf: &mut [u8]) -> ReadEarliestResult {
let attempted_read_start = self.0.cursor.load(Ordering::Relaxed);
self.create_reader(attempted_read_start);
todo!()
}
pub fn read_from(&self, buf: &mut [u8], start: LogicalIndex) -> usize {
todo!()
}
}
struct ReadEarliestResult {
bytes_read: usize,
slice_start: LogicalIndex,
}
impl<'b> Writer<'b> {
fn advance_write_cursor(&self, delta: usize) -> Result<usize, ()> {
todo!()
}
fn write(&self, data: &[u8]) {
todo!()
}
}