use crate::error::Error;
use crate::op::Sqe;
use crate::syscall;
use crate::types::{
CqeFlags, EnterFlags, Features, IoUringCqe, IoUringParams, IoUringSqe, IoVec, MapFlags, Prot,
RegisterOp, RingOffset, SetupFlags,
};
use core::sync::atomic::{AtomicU32, Ordering};
#[derive(Debug, Clone, Copy)]
pub struct Completion {
pub user_data: u64,
pub result: i32,
pub flags: CqeFlags,
}
impl Completion {
#[allow(clippy::cast_sign_loss)]
pub const fn into_result(&self) -> Result<u32, Error> {
if self.result < 0 {
Err(Error(-self.result))
} else {
Ok(self.result as u32)
}
}
#[must_use]
pub const fn is_err(&self) -> bool {
self.result < 0
}
#[must_use]
pub const fn buffer_id(&self) -> Option<u16> {
if self.flags.contains(CqeFlags::BUFFER) {
#[allow(clippy::cast_possible_truncation)]
Some((self.flags.bits() >> 16) as u16)
} else {
None
}
}
}
struct MappedRegion {
addr: usize,
len: usize,
}
impl MappedRegion {
const fn new(addr: usize, len: usize) -> Self {
Self { addr, len }
}
}
struct SetupGuard {
fd: usize,
sq_ring: MappedRegion,
cq_ring: MappedRegion,
sqes: MappedRegion,
}
impl SetupGuard {
const fn new(fd: usize) -> Self {
Self {
fd,
sq_ring: MappedRegion { addr: 0, len: 0 },
cq_ring: MappedRegion { addr: 0, len: 0 },
sqes: MappedRegion { addr: 0, len: 0 },
}
}
const fn disarm(self) {
core::mem::forget(self);
}
}
impl Drop for SetupGuard {
fn drop(&mut self) {
if self.sqes.len > 0 {
let _ = syscall::munmap(self.sqes.addr, self.sqes.len);
}
if self.cq_ring.len > 0 {
let _ = syscall::munmap(self.cq_ring.addr, self.cq_ring.len);
}
if self.sq_ring.len > 0 {
let _ = syscall::munmap(self.sq_ring.addr, self.sq_ring.len);
}
let _ = syscall::close(self.fd);
}
}
pub struct IoUring {
fd: usize,
sq_head: *const AtomicU32,
sq_tail: *const AtomicU32,
sq_mask: u32,
sq_flags: *const AtomicU32,
sqes: *mut IoUringSqe,
sq_tail_local: u32,
sq_submitted: u32,
cq_head: *const AtomicU32,
cq_tail: *const AtomicU32,
cq_mask: u32,
cqes: *const IoUringCqe,
cq_head_local: u32,
features: Features,
sq_ring: MappedRegion,
cq_ring: MappedRegion,
sqes_region: MappedRegion,
}
impl IoUring {
pub fn new(entries: u32) -> Result<Self, Error> {
IoUringBuilder::new(entries).build()
}
#[must_use]
pub fn builder(entries: u32) -> IoUringBuilder {
IoUringBuilder::new(entries)
}
#[must_use]
pub const fn features(&self) -> Features {
self.features
}
#[must_use]
pub fn cq_overflow(&self) -> bool {
const IORING_SQ_CQ_OVERFLOW: u32 = 1 << 1;
let flags = unsafe { &*self.sq_flags }.load(Ordering::Acquire);
flags & IORING_SQ_CQ_OVERFLOW != 0
}
#[must_use]
pub fn sq_need_wakeup(&self) -> bool {
const IORING_SQ_NEED_WAKEUP: u32 = 1 << 0;
let flags = unsafe { &*self.sq_flags }.load(Ordering::Acquire);
flags & IORING_SQ_NEED_WAKEUP != 0
}
#[allow(clippy::cast_ptr_alignment)]
fn from_params(entries: u32, params: &mut IoUringParams) -> Result<Self, Error> {
let prot = Prot::READ | Prot::WRITE;
let map = MapFlags::SHARED | MapFlags::POPULATE;
let fd = syscall::io_uring_setup(entries, &raw mut *params)?;
let mut guard = SetupGuard::new(fd);
let features = Features::from_raw(params.features);
let single_mmap = features.contains(Features::SINGLE_MMAP);
let sq_ring_sz =
params.sq_off.array as usize + params.sq_entries as usize * core::mem::size_of::<u32>();
let cq_ring_sz = params.cq_off.cqes as usize
+ params.cq_entries as usize * core::mem::size_of::<IoUringCqe>();
let mmap_sz = if single_mmap {
sq_ring_sz.max(cq_ring_sz)
} else {
sq_ring_sz
};
let sq_ring_ptr = syscall::mmap(0, mmap_sz, prot, map, fd, RingOffset::SqRing.into())?;
guard.sq_ring = MappedRegion::new(sq_ring_ptr, mmap_sz);
let (cq_ring_ptr, cq_ring_region) = if single_mmap {
(sq_ring_ptr, MappedRegion::new(0, 0))
} else {
let ptr = syscall::mmap(0, cq_ring_sz, prot, map, fd, RingOffset::CqRing.into())?;
let region = MappedRegion::new(ptr, cq_ring_sz);
guard.cq_ring = MappedRegion::new(ptr, cq_ring_sz);
(ptr, region)
};
let sqes_sz = params.sq_entries as usize * core::mem::size_of::<IoUringSqe>();
let sqes_ptr = syscall::mmap(0, sqes_sz, prot, map, fd, RingOffset::Sqes.into())?;
guard.sqes = MappedRegion::new(sqes_ptr, sqes_sz);
let sq_base = sq_ring_ptr as *const u8;
let sq_head = unsafe { sq_base.add(params.sq_off.head as usize) }.cast::<AtomicU32>();
let sq_tail = unsafe { sq_base.add(params.sq_off.tail as usize) }.cast::<AtomicU32>();
let sq_mask = unsafe { *sq_base.add(params.sq_off.ring_mask as usize).cast::<u32>() };
let sq_flags = unsafe { sq_base.add(params.sq_off.flags as usize) }.cast::<AtomicU32>();
let sq_array = unsafe { sq_base.add(params.sq_off.array as usize) } as *mut u32;
debug_assert!(sq_head.is_aligned(), "sq_head not aligned");
debug_assert!(sq_tail.is_aligned(), "sq_tail not aligned");
debug_assert!(sq_flags.is_aligned(), "sq_flags not aligned");
debug_assert!(sq_array.is_aligned(), "sq_array not aligned");
for i in 0..params.sq_entries {
unsafe { sq_array.add(i as usize).write(i) };
}
let cq_base = cq_ring_ptr as *const u8;
let cq_head = unsafe { cq_base.add(params.cq_off.head as usize) }.cast::<AtomicU32>();
let cq_tail = unsafe { cq_base.add(params.cq_off.tail as usize) }.cast::<AtomicU32>();
let cq_mask = unsafe { *cq_base.add(params.cq_off.ring_mask as usize).cast::<u32>() };
let cqes = unsafe { cq_base.add(params.cq_off.cqes as usize) }.cast::<IoUringCqe>();
debug_assert!(cq_head.is_aligned(), "cq_head not aligned");
debug_assert!(cq_tail.is_aligned(), "cq_tail not aligned");
debug_assert!(cqes.is_aligned(), "cqes not aligned");
let sq_tail_local = unsafe { &*sq_tail }.load(Ordering::Acquire);
let cq_head_local = unsafe { &*cq_head }.load(Ordering::Acquire);
guard.disarm();
Ok(Self {
fd,
sq_head,
sq_tail,
sq_mask,
sq_flags,
sqes: sqes_ptr as *mut IoUringSqe,
sq_tail_local,
sq_submitted: sq_tail_local,
cq_head,
cq_tail,
cq_mask,
cqes,
cq_head_local,
features,
sq_ring: MappedRegion::new(sq_ring_ptr, mmap_sz),
cq_ring: cq_ring_region,
sqes_region: MappedRegion::new(sqes_ptr, sqes_sz),
})
}
#[inline]
#[allow(clippy::needless_pass_by_value)]
pub fn push(&mut self, sqe: Sqe) -> Result<(), Error> {
let head = unsafe { &*self.sq_head }.load(Ordering::Acquire);
let next_tail = self.sq_tail_local.wrapping_add(1);
if next_tail.wrapping_sub(head) > self.sq_mask + 1 {
return Err(Error::EAGAIN);
}
let idx = self.sq_tail_local & self.sq_mask;
unsafe { *self.sqes.add(idx as usize) = sqe.0 };
self.sq_tail_local = next_tail;
Ok(())
}
pub fn push_nop(&mut self, user_data: u64) -> Result<(), Error> {
self.push(Sqe::nop().user_data(user_data))
}
#[inline]
pub fn flush_sq_tail(&self) {
unsafe { &*self.sq_tail }.store(self.sq_tail_local, Ordering::Release);
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub fn submit(&mut self) -> Result<u32, Error> {
self.flush_cq_head();
let to_submit = self.sq_tail_local.wrapping_sub(self.sq_submitted);
self.flush_sq_tail();
if to_submit == 0 {
return Ok(0);
}
let ret = syscall::io_uring_enter(self.fd, to_submit, 0, EnterFlags::default())?;
self.sq_submitted = self.sq_tail_local;
Ok(ret as u32)
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub fn submit_and_wait(&mut self, min_complete: u32) -> Result<u32, Error> {
self.flush_cq_head();
let to_submit = self.sq_tail_local.wrapping_sub(self.sq_submitted);
self.flush_sq_tail();
let ret = syscall::io_uring_enter(self.fd, to_submit, min_complete, EnterFlags::GETEVENTS)?;
self.sq_submitted = self.sq_tail_local;
Ok(ret as u32)
}
#[inline]
pub fn submit_sqpoll(&mut self) -> Result<(), Error> {
self.flush_cq_head();
self.flush_sq_tail();
self.sq_submitted = self.sq_tail_local;
if self.sq_need_wakeup() {
syscall::io_uring_enter(self.fd, 0, 0, EnterFlags::SQ_WAKEUP)?;
}
Ok(())
}
#[inline]
fn flush_cq_head(&self) {
unsafe { &*self.cq_head }.store(self.cq_head_local, Ordering::Release);
}
#[inline]
#[must_use]
pub fn complete(&mut self) -> Option<Completion> {
let tail = unsafe { &*self.cq_tail }.load(Ordering::Acquire);
if self.cq_head_local == tail {
return None;
}
let idx = self.cq_head_local & self.cq_mask;
let cqe = unsafe { &*self.cqes.add(idx as usize) };
let completion = Completion {
user_data: cqe.user_data,
result: cqe.res,
flags: CqeFlags::from_raw(cqe.flags),
};
self.cq_head_local = self.cq_head_local.wrapping_add(1);
Some(completion)
}
#[inline]
pub fn sync_cq(&self) {
self.flush_cq_head();
}
#[allow(clippy::cast_possible_truncation)]
pub fn register_buffers(&mut self, bufs: &[IoVec]) -> Result<(), Error> {
syscall::io_uring_register(
self.fd,
RegisterOp::RegisterBuffers.into(),
bufs.as_ptr() as usize,
bufs.len() as u32,
)?;
Ok(())
}
pub fn unregister_buffers(&mut self) -> Result<(), Error> {
syscall::io_uring_register(self.fd, RegisterOp::UnregisterBuffers.into(), 0, 0)?;
Ok(())
}
#[allow(clippy::cast_possible_truncation)]
pub fn register_provided_buffers(
&mut self,
bgid: u16,
count: u32,
buf_size: u32,
) -> Result<ProvidedBufferRing, Error> {
if count == 0 || buf_size == 0 || !count.is_power_of_two() {
return Err(Error::EINVAL);
}
let prot = Prot::READ | Prot::WRITE;
let map = MapFlags::PRIVATE | MapFlags::ANONYMOUS;
let ring_bytes = (count as usize) * core::mem::size_of::<crate::types::IoUringBuf>();
let ring_addr = syscall::mmap(0, ring_bytes, prot, map, usize::MAX, 0)?;
let bufs_bytes = (count as usize) * (buf_size as usize);
let bufs_addr = match syscall::mmap(0, bufs_bytes, prot, map, usize::MAX, 0) {
Ok(a) => a,
Err(e) => {
let _ = syscall::munmap(ring_addr, ring_bytes);
return Err(e);
}
};
let mut reg = crate::types::IoUringBufReg {
ring_addr: ring_addr as u64,
ring_entries: count,
bgid,
flags: 0,
resv: [0; 3],
};
if let Err(e) = syscall::io_uring_register(
self.fd,
RegisterOp::RegisterPbufRing.into(),
core::ptr::from_mut(&mut reg) as usize,
1,
) {
let _ = syscall::munmap(bufs_addr, bufs_bytes);
let _ = syscall::munmap(ring_addr, ring_bytes);
return Err(e);
}
let mut pbuf = ProvidedBufferRing {
fd: self.fd,
bgid,
mask: count - 1,
entries: count,
ring_addr,
ring_bytes,
bufs_addr,
bufs_bytes,
buf_size,
tail_local: 0,
};
for i in 0..count {
let addr = (bufs_addr + (i as usize) * (buf_size as usize)) as u64;
#[allow(clippy::cast_possible_truncation)]
pbuf.recycle_raw(addr, buf_size, i as u16);
}
pbuf.commit();
Ok(pbuf)
}
pub fn unregister_provided_buffers(&mut self, bgid: u16) -> Result<(), Error> {
let mut reg = crate::types::IoUringBufReg {
bgid,
..Default::default()
};
syscall::io_uring_register(
self.fd,
RegisterOp::UnregisterPbufRing.into(),
core::ptr::from_mut(&mut reg) as usize,
1,
)?;
Ok(())
}
#[allow(clippy::cast_possible_truncation)]
pub fn register_files(&mut self, fds: &[i32]) -> Result<(), Error> {
syscall::io_uring_register(
self.fd,
RegisterOp::RegisterFiles.into(),
fds.as_ptr() as usize,
fds.len() as u32,
)?;
Ok(())
}
pub fn unregister_files(&mut self) -> Result<(), Error> {
syscall::io_uring_register(self.fd, RegisterOp::UnregisterFiles.into(), 0, 0)?;
Ok(())
}
pub const fn completions(&mut self) -> Completions<'_> {
Completions { ring: self }
}
fn run_one(&mut self, sqe: Sqe) -> Result<u32, Error> {
self.push(sqe)?;
self.submit_and_wait(1)?;
self.complete().ok_or(Error::EAGAIN)?.into_result()
}
pub fn do_read(&mut self, fd: i32, buf: &mut [u8], offset: u64) -> Result<u32, Error> {
self.run_one(Sqe::read(fd, buf, offset))
}
pub fn do_write(&mut self, fd: i32, buf: &[u8], offset: u64) -> Result<u32, Error> {
self.run_one(Sqe::write(fd, buf, offset))
}
pub fn do_openat(
&mut self,
dfd: i32,
path: &core::ffi::CStr,
flags: crate::types::OpenFlags,
mode: crate::types::FileMode,
) -> Result<u32, Error> {
self.run_one(Sqe::openat(dfd, path, flags, mode))
}
pub fn do_close(&mut self, fd: i32) -> Result<u32, Error> {
self.run_one(Sqe::close(fd))
}
pub fn do_send(
&mut self,
fd: i32,
buf: &[u8],
flags: crate::types::MsgFlags,
) -> Result<u32, Error> {
self.run_one(Sqe::send(fd, buf, flags))
}
pub fn do_recv(
&mut self,
fd: i32,
buf: &mut [u8],
flags: crate::types::MsgFlags,
) -> Result<u32, Error> {
self.run_one(Sqe::recv(fd, buf, flags))
}
pub fn do_accept(&mut self, fd: i32, flags: crate::types::AcceptFlags) -> Result<u32, Error> {
self.run_one(Sqe::accept(fd, flags))
}
pub fn do_statx(
&mut self,
dfd: i32,
path: &core::ffi::CStr,
flags: crate::types::StatxFlags,
mask: crate::types::StatxMask,
statx_buf: &mut crate::types::Statx,
) -> Result<u32, Error> {
self.run_one(Sqe::statx(dfd, path, flags, mask, statx_buf))
}
pub fn do_fsync(&mut self, fd: i32, flags: crate::types::FsyncFlags) -> Result<u32, Error> {
self.run_one(Sqe::fsync(fd, flags))
}
}
pub struct Completions<'a> {
ring: &'a mut IoUring,
}
impl Iterator for Completions<'_> {
type Item = Completion;
fn next(&mut self) -> Option<Self::Item> {
self.ring.complete()
}
fn size_hint(&self) -> (usize, Option<usize>) {
let tail = unsafe { &*self.ring.cq_tail }.load(Ordering::Acquire);
let pending = tail.wrapping_sub(self.ring.cq_head_local) as usize;
(pending, None)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
self.flush_cq_head();
let _ = syscall::munmap(self.sqes_region.addr, self.sqes_region.len);
if self.cq_ring.len > 0 {
let _ = syscall::munmap(self.cq_ring.addr, self.cq_ring.len);
}
let _ = syscall::munmap(self.sq_ring.addr, self.sq_ring.len);
let _ = syscall::close(self.fd);
}
}
pub struct ProvidedBufferRing {
fd: usize,
bgid: u16,
mask: u32,
entries: u32,
ring_addr: usize,
ring_bytes: usize,
bufs_addr: usize,
bufs_bytes: usize,
buf_size: u32,
tail_local: u32,
}
impl ProvidedBufferRing {
#[must_use]
pub const fn bgid(&self) -> u16 {
self.bgid
}
#[must_use]
pub const fn entries(&self) -> u32 {
self.entries
}
#[must_use]
pub const fn buf_size(&self) -> u32 {
self.buf_size
}
#[must_use]
pub fn buffer(&self, buf_id: u16, len: u32) -> Option<&[u8]> {
unsafe {
buffer_slice(
self.bufs_addr as *const u8,
self.entries,
self.buf_size,
buf_id,
len,
)
}
}
#[must_use]
pub fn buffer_mut(&mut self, buf_id: u16, len: u32) -> Option<&mut [u8]> {
unsafe {
buffer_slice_mut(
self.bufs_addr as *mut u8,
self.entries,
self.buf_size,
buf_id,
len,
)
}
}
pub fn recycle(&mut self, buf_id: u16) {
assert!(
u32::from(buf_id) < self.entries,
"buf_id out of range for provided-buffer ring"
);
let off = (buf_id as usize) * (self.buf_size as usize);
let addr = (self.bufs_addr + off) as u64;
self.recycle_raw(addr, self.buf_size, buf_id);
}
pub fn recycle_and_commit(&mut self, buf_id: u16) {
self.recycle(buf_id);
self.commit();
}
pub fn commit(&self) {
let tail_ptr = self.tail_ptr();
#[allow(clippy::cast_possible_truncation)]
let tail = self.tail_local as u16;
unsafe { &*tail_ptr }.store(tail, Ordering::Release);
}
fn recycle_raw(&mut self, addr: u64, len: u32, bid: u16) {
let idx = self.tail_local & self.mask;
unsafe {
let entry = (self.ring_addr as *mut crate::types::IoUringBuf).add(idx as usize);
core::ptr::addr_of_mut!((*entry).addr).write(addr);
core::ptr::addr_of_mut!((*entry).len).write(len);
core::ptr::addr_of_mut!((*entry).bid).write(bid);
}
self.tail_local = self.tail_local.wrapping_add(1);
}
const fn tail_ptr(&self) -> *const core::sync::atomic::AtomicU16 {
const TAIL_OFFSET: usize = 14;
(self.ring_addr + TAIL_OFFSET) as *const core::sync::atomic::AtomicU16
}
}
unsafe fn buffer_slice<'a>(
base: *const u8,
entries: u32,
buf_size: u32,
buf_id: u16,
len: u32,
) -> Option<&'a [u8]> {
if u32::from(buf_id) >= entries || len > buf_size {
return None;
}
let off = (buf_id as usize) * (buf_size as usize);
unsafe { Some(core::slice::from_raw_parts(base.add(off), len as usize)) }
}
unsafe fn buffer_slice_mut<'a>(
base: *mut u8,
entries: u32,
buf_size: u32,
buf_id: u16,
len: u32,
) -> Option<&'a mut [u8]> {
if u32::from(buf_id) >= entries || len > buf_size {
return None;
}
let off = (buf_id as usize) * (buf_size as usize);
unsafe { Some(core::slice::from_raw_parts_mut(base.add(off), len as usize)) }
}
impl Drop for ProvidedBufferRing {
fn drop(&mut self) {
let mut reg = crate::types::IoUringBufReg {
bgid: self.bgid,
..Default::default()
};
let _ = syscall::io_uring_register(
self.fd,
RegisterOp::UnregisterPbufRing.into(),
core::ptr::from_mut(&mut reg) as usize,
1,
);
let _ = syscall::munmap(self.bufs_addr, self.bufs_bytes);
let _ = syscall::munmap(self.ring_addr, self.ring_bytes);
}
}
pub struct IoUringBuilder {
entries: u32,
params: IoUringParams,
}
impl IoUringBuilder {
#[must_use]
pub fn new(entries: u32) -> Self {
assert!(entries > 0, "io_uring entries must be > 0");
Self {
entries,
params: IoUringParams::default(),
}
}
#[must_use]
pub const fn sqpoll(mut self, idle_ms: u32) -> Self {
self.params.flags |= SetupFlags::SQPOLL.bits();
self.params.sq_thread_idle = idle_ms;
self
}
#[must_use]
pub const fn sqpoll_cpu(mut self, cpu: u32) -> Self {
self.params.flags |= SetupFlags::SQPOLL.bits() | SetupFlags::SQ_AFF.bits();
self.params.sq_thread_cpu = cpu;
self
}
#[must_use]
pub const fn cq_entries(mut self, n: u32) -> Self {
self.params.flags |= SetupFlags::CQSIZE.bits();
self.params.cq_entries = n;
self
}
#[must_use]
pub const fn clamp(mut self) -> Self {
self.params.flags |= SetupFlags::CLAMP.bits();
self
}
#[must_use]
pub const fn single_issuer(mut self) -> Self {
self.params.flags |= SetupFlags::SINGLE_ISSUER.bits();
self
}
#[must_use]
pub const fn attach_wq(mut self, wq_fd: u32) -> Self {
self.params.flags |= SetupFlags::ATTACH_WQ.bits();
self.params.wq_fd = wq_fd;
self
}
#[must_use]
pub const fn setup_flags(mut self, flags: SetupFlags) -> Self {
self.params.flags |= flags.bits();
self
}
pub fn build(mut self) -> Result<IoUring, Error> {
IoUring::from_params(self.entries, &mut self.params)
}
}
#[cfg(test)]
mod buffer_slice_tests {
extern crate std;
use std::{vec, vec::Vec};
use super::{buffer_slice, buffer_slice_mut};
const ENTRIES: u32 = 4;
const BUF_SIZE: u32 = 8;
fn backing() -> Vec<u8> {
vec![0u8; (ENTRIES * BUF_SIZE) as usize]
}
#[test]
fn round_trip_write_then_read() {
let mut mem = backing();
let base = mem.as_mut_ptr();
for id in 0..ENTRIES as u16 {
let slot =
unsafe { buffer_slice_mut::<'_>(base, ENTRIES, BUF_SIZE, id, BUF_SIZE) }.unwrap();
slot.fill(id as u8 + 1);
}
for id in 0..ENTRIES as u16 {
let slot =
unsafe { buffer_slice::<'_>(base.cast_const(), ENTRIES, BUF_SIZE, id, BUF_SIZE) }
.unwrap();
assert!(slot.iter().all(|&b| b == id as u8 + 1));
}
}
#[test]
fn partial_len_returns_prefix() {
let mut mem = backing();
let base = mem.as_mut_ptr();
let slot = unsafe { buffer_slice_mut::<'_>(base, ENTRIES, BUF_SIZE, 2, BUF_SIZE) }.unwrap();
slot.copy_from_slice(b"ABCDEFGH");
let got =
unsafe { buffer_slice::<'_>(base.cast_const(), ENTRIES, BUF_SIZE, 2, 3) }.unwrap();
assert_eq!(got, b"ABC");
assert_eq!(got.len(), 3);
}
#[test]
fn zero_len_is_empty_slice_not_none() {
let mut mem = backing();
let base = mem.as_mut_ptr();
let slot =
unsafe { buffer_slice::<'_>(base.cast_const(), ENTRIES, BUF_SIZE, 0, 0) }.unwrap();
assert!(slot.is_empty());
}
#[test]
fn out_of_range_buf_id_returns_none() {
let mut mem = backing();
let base = mem.as_mut_ptr();
let got =
unsafe { buffer_slice::<'_>(base.cast_const(), ENTRIES, BUF_SIZE, ENTRIES as u16, 1) };
assert!(got.is_none());
}
#[test]
fn len_exceeding_buf_size_returns_none() {
let mut mem = backing();
let base = mem.as_mut_ptr();
let got =
unsafe { buffer_slice::<'_>(base.cast_const(), ENTRIES, BUF_SIZE, 0, BUF_SIZE + 1) };
assert!(got.is_none());
}
#[test]
fn adjacent_slots_are_disjoint() {
let mut mem = backing();
let base = mem.as_mut_ptr();
let a = unsafe { buffer_slice_mut::<'_>(base, ENTRIES, BUF_SIZE, 0, BUF_SIZE) }.unwrap();
let b = unsafe { buffer_slice_mut::<'_>(base, ENTRIES, BUF_SIZE, 1, BUF_SIZE) }.unwrap();
a.fill(0xAA);
b.fill(0xBB);
assert!(a.iter().all(|&x| x == 0xAA));
assert!(b.iter().all(|&x| x == 0xBB));
}
}