use std::sync::Arc;
use crate::Vec64;
use crate::structs::shared_buffer::internal::owned::{OWNED_VT, Owned};
use crate::structs::shared_buffer::internal::pvec::PromotableVec;
use crate::structs::shared_buffer::internal::vtable::{
PROMO_EVEN_VT, PROMO_ODD_VT, PROMO64_EVEN_VT, PROMO64_ODD_VT, STATIC_VT, Vtable,
};
use core::ops::RangeBounds;
use core::{ptr, slice};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::ops::Deref;
use std::sync::atomic::{AtomicPtr, AtomicUsize};
mod internal {
pub(crate) mod owned;
pub(crate) mod pvec;
pub(crate) mod vtable;
}
#[cfg(all(target_os = "linux", feature = "memfd"))]
mod memfd;
#[cfg(all(target_os = "linux", feature = "memfd"))]
pub use memfd::MemfdBuffer;
#[repr(C)]
pub struct SharedBuffer {
ptr: *const u8,
len: usize,
data: AtomicPtr<()>, vtable: &'static Vtable,
}
impl SharedBuffer {
pub const fn new() -> Self {
const EMPTY: &[u8] = &[];
Self::from_static(EMPTY)
}
pub const fn from_static(s: &'static [u8]) -> Self {
Self {
ptr: s.as_ptr(),
len: s.len(),
data: AtomicPtr::new(ptr::null_mut()),
vtable: &STATIC_VT,
}
}
pub fn from_vec(mut v: Vec<u8>) -> Self {
let ptr = v.as_mut_ptr();
let len = v.len();
let cap = v.capacity();
let raw = Box::into_raw(Box::new(PromotableVec::<Vec<u8>> {
ref_cnt: AtomicUsize::new(1),
inner: v,
}));
Self {
ptr,
len,
data: AtomicPtr::new(raw.cast()),
vtable: if cap & 1 == 0 {
&PROMO_EVEN_VT
} else {
&PROMO_ODD_VT
},
}
}
pub fn from_vec64(mut v: Vec64<u8>) -> Self {
let ptr = v.as_mut_ptr();
let len = v.len();
let cap = v.capacity();
let raw = Box::into_raw(Box::new(PromotableVec::<Vec64<u8>> {
ref_cnt: AtomicUsize::new(1),
inner: v,
}));
Self {
ptr,
len,
data: AtomicPtr::new(raw.cast()),
vtable: if cap & 1 == 0 {
&PROMO64_EVEN_VT
} else {
&PROMO64_ODD_VT
},
}
}
pub unsafe fn from_vec64_typed<T>(v: Vec64<T>) -> Self {
let byte_len = v.len() * std::mem::size_of::<T>();
let byte_cap = v.0.capacity() * std::mem::size_of::<T>();
let ptr = v.0.as_ptr() as *mut u8;
std::mem::forget(v);
let raw_vec = unsafe {
Vec::from_raw_parts_in(ptr, byte_len, byte_cap, vec64::Vec64Alloc::default())
};
Self::from_vec64(Vec64(raw_vec))
}
pub fn from_arc<M: ?Sized + AsRef<[u8]> + Send + Sync + 'static>(arc: Arc<M>) -> Self {
struct ArcOwner<M: ?Sized>(Arc<M>);
impl<M: ?Sized + AsRef<[u8]>> AsRef<[u8]> for ArcOwner<M> {
#[inline]
fn as_ref(&self) -> &[u8] { (*self.0).as_ref() }
}
unsafe impl<M: ?Sized + Send + Sync> Send for ArcOwner<M> {}
unsafe impl<M: ?Sized + Send + Sync> Sync for ArcOwner<M> {}
Self::from_owner(ArcOwner(arc))
}
pub fn from_owner<T>(owner: T) -> Self
where
T: AsRef<[u8]> + Send + Sync + 'static,
{
unsafe fn drop_typed<T: AsRef<[u8]> + Send + Sync + 'static>(ptr: *mut ()) {
unsafe { drop(Box::from_raw(ptr as *mut Owned<T>)); }
}
let raw: *mut Owned<T> = Box::into_raw(Box::new(Owned {
ref_cnt: AtomicUsize::new(1),
drop_fn: drop_typed::<T>,
owner,
}));
let buf = unsafe { (*raw).owner.as_ref() };
Self {
ptr: buf.as_ptr(),
len: buf.len(),
data: AtomicPtr::new(raw.cast()),
vtable: &OWNED_VT,
}
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
use core::ops::Bound::*;
let start = match range.start_bound() {
Unbounded => 0,
Included(&n) => n,
Excluded(&n) => n + 1,
};
let end = match range.end_bound() {
Unbounded => self.len,
Included(&n) => n + 1,
Excluded(&n) => n,
};
assert!(start <= end && end <= self.len);
if start == end {
return SharedBuffer::new();
}
let mut s = self.clone();
s.ptr = unsafe { s.ptr.add(start) };
s.len = end - start;
s
}
#[inline]
pub fn into_vec(self) -> Vec<u8> {
let me = core::mem::ManuallyDrop::new(self);
unsafe { (me.vtable.to_vec)(&me.data, me.ptr, me.len) }
}
#[inline]
pub fn into_vec64(self) -> Vec64<u8> {
let me = core::mem::ManuallyDrop::new(self);
unsafe { (me.vtable.to_vec64)(&me.data, me.ptr, me.len) }
}
#[inline]
pub fn is_unique(&self) -> bool {
unsafe { (self.vtable.is_unique)(&self.data) }
}
}
impl Clone for SharedBuffer {
fn clone(&self) -> Self {
unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
}
}
impl Drop for SharedBuffer {
fn drop(&mut self) {
unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
}
}
impl Default for SharedBuffer {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl PartialEq for SharedBuffer {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_slice() == other.as_slice()
}
}
impl Eq for SharedBuffer {}
impl PartialEq<[u8]> for SharedBuffer {
#[inline]
fn eq(&self, other: &[u8]) -> bool {
self.as_slice() == other
}
}
impl PartialEq<SharedBuffer> for [u8] {
#[inline]
fn eq(&self, other: &SharedBuffer) -> bool {
self == other.as_slice()
}
}
impl PartialEq<Vec<u8>> for SharedBuffer {
#[inline]
fn eq(&self, other: &Vec<u8>) -> bool {
self.as_slice() == other.as_slice()
}
}
impl PartialEq<SharedBuffer> for Vec<u8> {
#[inline]
fn eq(&self, other: &SharedBuffer) -> bool {
self.as_slice() == other.as_slice()
}
}
impl PartialOrd for SharedBuffer {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.as_slice().partial_cmp(other.as_slice())
}
}
impl Ord for SharedBuffer {
fn cmp(&self, other: &Self) -> Ordering {
self.as_slice().cmp(other.as_slice())
}
}
impl Hash for SharedBuffer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_slice().hash(state)
}
}
impl fmt::Debug for SharedBuffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("SharedBuffer")
.field(&self.as_slice())
.finish()
}
}
impl Deref for SharedBuffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.as_slice()
}
}
impl AsRef<[u8]> for SharedBuffer {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
impl Borrow<[u8]> for SharedBuffer {
#[inline]
fn borrow(&self) -> &[u8] {
self.as_slice()
}
}
impl From<Vec<u8>> for SharedBuffer {
#[inline]
fn from(v: Vec<u8>) -> Self {
Self::from_vec(v)
}
}
impl From<Vec64<u8>> for SharedBuffer {
#[inline]
fn from(v: Vec64<u8>) -> Self {
Self::from_vec64(v)
}
}
impl From<&'static [u8]> for SharedBuffer {
#[inline]
fn from(s: &'static [u8]) -> Self {
Self::from_static(s)
}
}
impl IntoIterator for SharedBuffer {
type Item = u8;
type IntoIter = std::vec::IntoIter<u8>;
fn into_iter(self) -> Self::IntoIter {
self.into_vec().into_iter()
}
}
impl<'a> IntoIterator for &'a SharedBuffer {
type Item = &'a u8;
type IntoIter = std::slice::Iter<'a, u8>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.as_slice().iter()
}
}
impl FromIterator<u8> for SharedBuffer {
#[inline]
fn from_iter<I: IntoIterator<Item = u8>>(iter: I) -> Self {
let v: Vec<u8> = iter.into_iter().collect();
Self::from_vec(v)
}
}
impl fmt::Display for SharedBuffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match std::str::from_utf8(self.as_slice()) {
Ok(s) => f.write_str(s),
Err(_) => {
for byte in self.as_slice() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
}
}
}
unsafe impl Send for SharedBuffer {}
unsafe impl Sync for SharedBuffer {}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
#[test]
fn roundtrip_vec() {
let v = vec![1, 2, 3, 4, 5];
let sb = SharedBuffer::from_vec(v);
assert_eq!(sb.as_slice(), &[1, 2, 3, 4, 5]);
let v2 = sb.clone().into_vec();
assert_eq!(v2, vec![1, 2, 3, 4, 5]);
}
#[test]
fn roundtrip_vec64() {
let mut v64 = Vec64::with_capacity(5);
v64.extend_from_slice(&[9, 8, 7, 6, 5]);
let sb = SharedBuffer::from_vec64(v64);
assert_eq!(sb.as_slice(), &[9, 8, 7, 6, 5]);
let v64_out = sb.clone().into_vec64();
assert_eq!(v64_out.as_slice(), &[9, 8, 7, 6, 5]);
}
#[test]
fn owned_unique_check() {
let mmap = Arc::new([10u8, 11, 12, 13]) as Arc<[u8]>;
let sb = SharedBuffer::from_owner(mmap);
assert!(sb.is_unique());
let sb2 = sb.clone();
assert!(!sb.is_unique());
drop(sb2);
assert!(sb.is_unique());
}
}