use std::alloc::Layout;
use std::marker::PhantomData;
use std::mem;
#[repr(C)]
struct FatPointer {
data: *mut (),
vtable: *const VTable,
}
#[repr(C)]
struct VTable {
pad: *const (),
size: usize,
align: usize,
}
#[inline]
fn check_vtable_access() {
#[inline]
fn check(mut item: impl CallTrait<u64>) {
let ctref: &mut dyn CallTrait<u64> = &mut item;
if mem::size_of_val(&ctref) != mem::size_of::<FatPointer>() {
std::mem::forget(item);
panic!(
"Size of Rust trait object reference has changed. \
Report issue on Stakker github and enable feature \
'no-unsafe-queue' to work around this for the moment"
);
}
let size = std::mem::size_of_val(ctref);
let align = std::mem::align_of_val(ctref);
assert!(size > 8 && size > align); unsafe {
let repr = mem::transmute_copy::<&mut dyn CallTrait<u64>, FatPointer>(&ctref);
let size2 = (*repr.vtable).size;
let align2 = (*repr.vtable).align;
std::mem::forget(item);
assert_eq!(
(size, align),
(size2, align2),
"Layout of Rust trait object vtable has changed. \
Report issue on Stakker github and enable feature \
'no-unsafe-queue' to work around this for the moment"
);
}
}
#[inline(never)]
fn with_u64(v1: u64, v2: u64) {
check(CallItem::new(move |a| *a = *a * v1 + v2));
}
#[repr(align(32))]
struct Align32([u64; 10]);
#[inline(never)]
fn with_align32(v: Align32) {
check(CallItem::new(move |a| *a = *a * v.0[0] + v.0[9]));
}
with_u64(12345, 34567);
with_align32(Align32([123456789; 10]));
}
pub struct FnOnceQueue<S: 'static> {
storage: hvec::HVec,
phantomdata: PhantomData<S>,
}
impl<S: 'static> FnOnceQueue<S> {
pub const fn new() -> Self {
Self {
storage: hvec::HVec::new(),
phantomdata: PhantomData,
}
}
#[inline]
pub fn sanity_check() {
check_vtable_access();
}
#[inline]
pub fn push(&mut self, value: impl FnOnce(&mut S) + 'static) {
Self::push_aux(&mut self.storage, value);
}
#[inline(always)]
fn push_aux(hv: &mut hvec::HVec, value: impl FnOnce(&mut S) + 'static) {
let mut item = CallItem::new(value);
let ctref: &mut dyn CallTrait<S> = &mut item;
assert_eq!(mem::size_of_val(&ctref), mem::size_of::<FatPointer>());
let repr = unsafe { mem::transmute_copy::<&mut dyn CallTrait<S>, FatPointer>(&ctref) };
hv.push(repr.vtable as *const (), item, Self::expand_storage);
}
const INITIAL_ALLOCATION: usize = 1024;
#[inline(never)]
fn expand_storage(hv: &mut hvec::HVec, req: usize) {
let push_old = hv.len() != 0;
let mut req2 = req;
if push_old {
req2 += mem::size_of::<(*mut (), FnOnceQueue<()>)>();
}
let size = (hv.cap().max(req2) + 1)
.max(Self::INITIAL_ALLOCATION)
.next_power_of_two();
let new = hvec::HVec::with_size(size);
let old = mem::replace(hv, new);
if push_old {
let mut old_queue = Self {
storage: old,
phantomdata: PhantomData,
};
Self::push_aux(hv, move |s| old_queue.execute(s));
}
assert!(hv.cap() - hv.len() >= req);
}
#[inline]
pub fn push_box(&mut self, value: Box<dyn FnOnce(&mut S) + 'static>) {
self.push(move |s| value(s));
}
pub fn is_empty(&self) -> bool {
self.storage.len() == 0
}
#[cfg(test)]
pub(crate) fn len(&self) -> usize {
self.storage.len()
}
#[cfg(test)]
pub(crate) fn cap(&self) -> usize {
self.storage.cap()
}
pub fn execute(&mut self, context: &mut S) {
self.drain_for_each(|ptr| unsafe { (&mut *ptr).call(context) });
}
#[inline]
fn drain_for_each(&mut self, mut apply: impl FnMut(*mut dyn CallTrait<S>)) {
unsafe {
let mut it = self.storage.drain();
while let Some(vtable) = it.next_vp() {
let vtable = vtable as *const VTable;
let layout = Layout::from_size_align_unchecked((*vtable).size, (*vtable).align);
let data = it.next_unchecked(layout);
let repr = FatPointer { data, vtable };
apply(mem::transmute_copy::<FatPointer, *mut dyn CallTrait<S>>(
&repr,
));
}
}
}
}
impl<S> Drop for FnOnceQueue<S> {
fn drop(&mut self) {
self.drain_for_each(|ptr| unsafe { (&mut *ptr).drop() });
}
}
impl<S> Default for FnOnceQueue<S> {
fn default() -> Self {
Self::new()
}
}
mod hvec {
use std::alloc::{self, Layout};
use std::marker::PhantomData;
use std::mem;
use std::ptr;
type VP = *const ();
pub struct HVec {
ptr: *mut u8,
len: usize,
cap: usize,
}
#[inline]
unsafe fn align(p: *mut u8, pow2: usize) -> *mut u8 {
let inc = (pow2 - 1) & !((p as usize).wrapping_sub(1));
p.add(inc)
}
#[inline]
const fn align_off(off: usize, pow2: usize) -> usize {
let inc = (pow2 - 1) & !(off.wrapping_sub(1));
off + inc
}
impl HVec {
pub const fn new() -> Self {
Self {
ptr: ptr::null_mut(),
len: 0,
cap: 0,
}
}
pub fn with_size(size: usize) -> Self {
let layout = Layout::from_size_align(size, mem::align_of::<VP>()).unwrap();
let ptr = unsafe { alloc::alloc(layout) };
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
Self {
ptr,
len: 0,
cap: size,
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn cap(&self) -> usize {
self.cap
}
#[inline]
pub fn push<T>(&mut self, v1: VP, v2: T, expand: impl FnOnce(&mut Self, usize)) {
let req = mem::size_of::<VP>(); let req = align_off(req, mem::align_of::<T>()); let req = req + mem::size_of::<T>(); let req = align_off(req, mem::align_of::<VP>()); if req > self.cap - self.len {
expand(self, req);
#[cfg(debug_assertions)]
if req > self.cap - self.len {
mem::forget(v2);
panic!("HVec::push: not enough space after expand");
}
}
#[allow(clippy::cast_ptr_alignment)]
unsafe {
let p = self.ptr.add(self.len);
debug_assert_eq!(0, (p as usize) % mem::align_of::<VP>());
(p as *mut VP).write(v1);
let p = p.add(mem::size_of::<VP>());
let p = align(p, mem::align_of::<T>());
debug_assert_eq!(0, (p as usize) % mem::align_of::<T>());
(p as *mut T).write(v2);
let p = p.add(mem::size_of::<T>());
let p = align(p, mem::align_of::<VP>());
self.len = (p as usize) - (self.ptr as usize);
debug_assert!(self.len <= self.cap);
}
}
#[inline]
pub fn drain(&mut self) -> Drain<'_> {
let end = (self.ptr as usize + self.len) as *mut u8;
self.len = 0;
Drain {
pos: self.ptr,
end,
phantomdata: PhantomData,
}
}
}
impl Drop for HVec {
fn drop(&mut self) {
if !self.ptr.is_null() {
let layout = Layout::from_size_align(self.cap, mem::align_of::<VP>()).unwrap();
unsafe { alloc::dealloc(self.ptr, layout) };
}
}
}
pub struct Drain<'a> {
pos: *mut u8,
end: *mut u8,
phantomdata: PhantomData<&'a ()>,
}
impl<'a> Drain<'a> {
#[inline]
pub unsafe fn next_vp(&mut self) -> Option<VP> {
if self.pos < self.end {
let p = self.pos;
self.pos = p.add(mem::size_of::<VP>());
debug_assert!(self.pos <= self.end);
debug_assert_eq!(0, (p as usize) % mem::align_of::<VP>());
#[allow(clippy::cast_ptr_alignment)]
Some(*(p as *mut VP))
} else {
None
}
}
#[inline]
pub unsafe fn next_unchecked(&mut self, layout: Layout) -> *mut () {
let p = align(self.pos, layout.align());
self.pos = align(p.add(layout.size()), mem::align_of::<VP>());
debug_assert!(self.pos <= self.end);
debug_assert_eq!(0, (p as usize) % layout.align());
p as *mut ()
}
}
}
trait CallTrait<S> {
unsafe fn call(&mut self, c: &mut S);
unsafe fn drop(&mut self);
}
struct CallItem<S, F>
where
F: FnOnce(&mut S),
{
cb: F,
phantomdata: PhantomData<S>,
}
impl<S, F> CallItem<S, F>
where
F: FnOnce(&mut S),
{
fn new(f: F) -> Self {
Self {
cb: f,
phantomdata: PhantomData,
}
}
}
impl<S, F> CallTrait<S> for CallItem<S, F>
where
F: FnOnce(&mut S),
{
unsafe fn call(&mut self, c: &mut S) {
let cb = std::ptr::read(&self.cb);
cb(c);
}
unsafe fn drop(&mut self) {
std::ptr::drop_in_place(&mut self.cb);
}
}
impl<S, F> Drop for CallItem<S, F>
where
F: FnOnce(&mut S),
{
fn drop(&mut self) {
panic!("CallItem must never be dropped");
}
}
#[cfg(test)]
mod tests {
use std::cell::RefCell;
use std::rc::Rc;
fn d1(v1: i32) {
println!("d1: {}", v1);
}
fn d2(v1: i32, v2: i32) {
println!("d2: {} {}", v1, v2);
}
fn d3(v1: i32, v2: i32, v3: i32) {
println!("d3: {} {} {}", v1, v2, v3);
}
struct Confirm(u64);
impl Confirm {
fn push(&mut self, nyb: u64) {
self.0 = (self.0 << 4) + nyb;
}
}
#[inline(never)]
fn add_d3(queue: &mut super::FnOnceQueue<Confirm>, v1: i32, v2: i32, v3: i32) {
queue.push(move |c| {
c.push(0xD);
d3(v1, v2, v3);
});
}
const fn round_up(val: usize, pow2: usize) -> usize {
val.wrapping_add(pow2 - 1) & !(pow2 - 1)
}
#[repr(align(64))]
struct Align64([u64; 8]);
fn accept_align64(c: &mut Confirm, v: Align64) {
c.push(0x6);
assert_eq!(v.0[0], 123456789);
}
#[inline(never)]
fn push_call_to_accept_align64(queue: &mut super::FnOnceQueue<Confirm>, v: Align64) {
queue.push(move |c| accept_align64(c, v));
}
fn accept_bigarr(c: &mut Confirm, v: [u32; 512]) {
c.push(0xB);
assert_eq!(v[0], 123456789);
}
#[inline(never)]
fn push_call_to_accept_bigarr(queue: &mut super::FnOnceQueue<Confirm>, v: [u32; 512]) {
queue.push(move |c| accept_bigarr(c, v));
}
#[test]
fn check_space_used() {
super::check_vtable_access();
let mut confirm = Confirm(0xF);
let mut queue = super::FnOnceQueue::<Confirm>::new();
let i32_unit = std::mem::size_of::<i32>();
let usize_unit = std::mem::size_of::<usize>();
assert_eq!(queue.cap(), 0);
let u0 = queue.len();
add_d3(&mut queue, 12345678, 23456781, 34567812);
assert_eq!(
queue.len() - u0,
round_up(usize_unit + 3 * i32_unit, usize_unit)
);
assert_eq!(queue.cap(), 1024);
let u0 = queue.len();
add_d3(&mut queue, 987654321, 765432198, 543219876);
assert_eq!(
queue.len() - u0,
round_up(usize_unit + 3 * i32_unit, usize_unit)
);
let u0 = queue.len();
queue.push(|c| {
c.push(1);
d1(1);
});
assert_eq!(queue.len() - u0, usize_unit);
let u0 = queue.len();
queue.push(|c| {
c.push(2);
d2(2, 3);
});
assert_eq!(queue.len() - u0, usize_unit);
let u0 = queue.len();
queue.push(|c| {
c.push(3);
d3(4, 5, 6);
});
assert_eq!(queue.len() - u0, usize_unit);
push_call_to_accept_align64(&mut queue, Align64([123456789; 8]));
let u0 = queue.len();
push_call_to_accept_align64(&mut queue, Align64([123456789; 8]));
assert_eq!(queue.len() - u0, 128);
assert_eq!(queue.cap(), 1024);
push_call_to_accept_bigarr(&mut queue, [123456789; 512]);
assert_eq!(
queue.len(),
usize_unit + std::mem::size_of::<super::FnOnceQueue<()>>() + usize_unit + 2048
);
assert_eq!(queue.cap(), 4096);
push_call_to_accept_bigarr(&mut queue, [123456789; 512]);
assert_eq!(queue.cap(), 8192);
queue.execute(&mut confirm);
assert_eq!(confirm.0, 0xFDD12366BB);
}
struct TestDrop(Rc<RefCell<u32>>);
impl TestDrop {
fn run(&self) {
panic!("TestDrop::run should never execute");
}
}
impl Drop for TestDrop {
fn drop(&mut self) {
*self.0.borrow_mut() += 1;
}
}
#[test]
fn test_drop() {
super::check_vtable_access();
let confirm = Rc::new(RefCell::new(0));
let mut queue = super::FnOnceQueue::<()>::new();
let test = TestDrop(confirm.clone());
queue.push(move |_| test.run());
assert!(queue.len() > 0);
assert_eq!(0, *confirm.borrow());
drop(queue);
assert_eq!(1, *confirm.borrow());
}
}