use core::slice;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::Vec64;
use crate::structs::shared_buffer::SharedBuffer;
use crate::structs::shared_buffer::internal::vtable::Vtable;
#[repr(C)]
pub(crate) struct Owned<T: AsRef<[u8]> + Send + Sync + 'static> {
pub(crate) ref_cnt: AtomicUsize,
pub(crate) drop_fn: unsafe fn(*mut ()),
pub(crate) owner: T,
}
unsafe fn owned_clone(h: &AtomicPtr<()>, p: *const u8, l: usize) -> SharedBuffer {
let raw = h.load(Ordering::Acquire);
assert!(!raw.is_null());
let ref_cnt = unsafe { &*(raw as *const AtomicUsize) };
ref_cnt.fetch_add(1, Ordering::Relaxed);
SharedBuffer {
ptr: p,
len: l,
data: AtomicPtr::new(raw),
vtable: &OWNED_VT,
}
}
unsafe fn owned_drop(h: &mut AtomicPtr<()>, _p: *const u8, _l: usize) {
let raw = h.load(Ordering::Acquire);
if raw.is_null() {
return;
}
let ref_cnt = unsafe { &*(raw as *const AtomicUsize) };
if ref_cnt.fetch_sub(1, Ordering::AcqRel) == 1 {
unsafe {
let drop_fn_ptr =
(raw as *const u8).add(std::mem::size_of::<AtomicUsize>())
as *const unsafe fn(*mut ());
let drop_fn = *drop_fn_ptr;
drop_fn(raw);
}
}
}
pub(crate) static OWNED_VT: Vtable = Vtable {
clone: owned_clone,
drop: owned_drop,
is_unique: |h| {
let raw = h.load(Ordering::Acquire);
if raw.is_null() {
return false;
}
let ref_cnt = unsafe { &*(raw as *const AtomicUsize) };
ref_cnt.load(Ordering::Acquire) == 1
},
to_vec: |_, p, l| unsafe { slice::from_raw_parts(p, l) }.to_vec(),
to_vec64: |_, p, l| {
let mut v = Vec64::with_capacity(l);
unsafe {
v.extend_from_slice(slice::from_raw_parts(p, l));
}
v
},
#[cfg(all(target_os = "linux", feature = "memfd"))]
memfd_fd: |_| None,
};