1#![allow(unsafe_op_in_unsafe_fn, non_snake_case)]
2#![warn(clippy::pedantic)]
3#![allow(clippy::missing_safety_doc,
4 clippy::cast_sign_loss,
5 clippy::similar_names,
6 clippy::cast_possible_truncation,
7 clippy::cast_possible_wrap,
8 clippy::cast_ptr_alignment,
9 clippy::used_underscore_items,
10 clippy::unnecessary_cast)]
11
12mod uring;
13
14use std::{
15 mem::{self, zeroed},
16 os::raw::{c_char, c_int, c_longlong, c_uint, c_ushort, c_void},
17 ptr,
18 sync::atomic::{
19 AtomicU16, AtomicU32,
20 Ordering::{self, Acquire, Relaxed, Release},
21 },
22 time::Duration,
23};
24
25pub use uring::*;
26
27const LIBURING_UDATA_TIMEOUT: u64 = u64::MAX;
28
29trait Atomic: Copy
30{
31 unsafe fn store(p: *mut Self, val: Self, order: Ordering);
32 unsafe fn load(p: *mut Self, order: Ordering) -> Self;
33}
34
35impl Atomic for u32
36{
37 #[inline]
38 unsafe fn store(p: *mut u32, val: u32, order: Ordering)
39 {
40 AtomicU32::from_ptr(p).store(val, order);
41 }
42
43 #[inline]
44 unsafe fn load(p: *mut u32, order: Ordering) -> u32
45 {
46 AtomicU32::from_ptr(p).load(order)
47 }
48}
49
50impl Atomic for u16
51{
52 #[inline]
53 unsafe fn store(p: *mut u16, val: u16, order: Ordering)
54 {
55 AtomicU16::from_ptr(p).store(val, order);
56 }
57
58 #[inline]
59 unsafe fn load(p: *mut u16, order: Ordering) -> u16
60 {
61 AtomicU16::from_ptr(p).load(order)
62 }
63}
64
65unsafe fn io_uring_smp_store_release<T: Atomic>(p: *mut T, v: T)
66{
67 Atomic::store(p, v, Release);
68}
69
70unsafe fn io_uring_smp_load_acquire<T: Atomic>(p: *const T) -> T
71{
72 Atomic::load(p.cast_mut(), Acquire)
73}
74
75unsafe fn IO_URING_READ_ONCE<T: Atomic>(var: *const T) -> T
76{
77 Atomic::load(var.cast_mut(), Relaxed)
78}
79
80unsafe fn IO_URING_WRITE_ONCE<T: Atomic>(var: *mut T, val: T)
81{
82 Atomic::store(var, val, Relaxed);
83}
84
85#[must_use]
90#[inline]
91unsafe fn uring_ptr_to_u64(ptr: *const c_void) -> u64
92{
93 ptr as u64
94}
95
96#[inline]
97pub unsafe fn io_uring_opcode_supported(p: *mut io_uring_probe, op: c_int) -> c_int
98{
99 if op > (*p).last_op.into() {
100 return 0;
101 }
102
103 i32::from((*(*p).ops.as_ptr().add(op as _)).flags & IO_URING_OP_SUPPORTED as u16 != 0)
104}
105
106#[must_use]
112#[inline]
113pub fn io_uring_cqe_shift_from_flags(flags: c_uint) -> c_uint
114{
115 u32::from(flags & IORING_SETUP_CQE32 != 0)
116}
117
118#[must_use]
119#[inline]
120pub unsafe fn io_uring_cqe_shift(ring: *const io_uring) -> c_uint
121{
122 io_uring_cqe_shift_from_flags((*ring).flags)
123}
124
125#[must_use]
126#[inline]
127pub unsafe fn io_uring_cqe_nr(cqe: *const io_uring_cqe) -> c_uint
128{
129 let shift = i32::from((*cqe).flags & IORING_CQE_F_32 != 0);
130 1 << shift
131}
132
133#[inline]
134unsafe fn io_uring_cqe_iter_init(ring: *const io_uring) -> io_uring_cqe_iter
135{
136 io_uring_cqe_iter { cqes: (*ring).cq.cqes,
137 mask: (*ring).cq.ring_mask,
138 shift: io_uring_cqe_shift(ring),
139 head: *(*ring).cq.khead,
140 tail: io_uring_smp_load_acquire((*ring).cq.ktail) }
142}
143
144#[inline]
145unsafe fn io_uring_cqe_iter_next(iter: *mut io_uring_cqe_iter, cqe: *mut *mut io_uring_cqe)
146 -> bool
147{
148 if (*iter).head == (*iter).tail {
149 return false;
150 }
151
152 let head = (*iter).head;
153 (*iter).head += 1;
154
155 let offset = (head & (*iter).mask) << (*iter).shift;
156 *cqe = (*iter).cqes.add(offset as usize);
157
158 if (*(*cqe)).flags & IORING_CQE_F_32 > 0 {
159 (*iter).head += 1;
160 }
161
162 true
163}
164
165pub unsafe fn io_uring_for_each_cqe<F>(ring: *mut io_uring, mut f: F)
166 where F: FnMut(*mut io_uring_cqe)
167{
168 let mut iter = io_uring_cqe_iter_init(ring);
169 let mut cqe = ptr::null_mut::<io_uring_cqe>();
170 while io_uring_cqe_iter_next(&raw mut iter, &raw mut cqe) {
171 f(cqe);
172 }
173}
174
175#[inline]
179pub unsafe fn io_uring_cq_advance(ring: *mut io_uring, nr: c_uint)
180{
181 if nr > 0 {
182 let cq = &raw mut (*ring).cq;
183
184 io_uring_smp_store_release((*cq).khead, *(*cq).khead + nr);
189 }
190}
191
192#[inline]
197pub unsafe fn io_uring_cqe_seen(ring: *mut io_uring, cqe: *mut io_uring_cqe)
198{
199 if !cqe.is_null() {
200 io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
201 }
202}
203
204#[inline]
213pub unsafe fn io_uring_sqe_set_data(sqe: *mut io_uring_sqe, data: *mut c_void)
214{
215 (*sqe).user_data = data as u64;
216}
217
218#[must_use]
219#[inline]
220pub unsafe fn io_uring_cqe_get_data(cqe: *const io_uring_cqe) -> *mut c_void
221{
222 (*cqe).user_data as *mut c_void
223}
224
225#[inline]
231pub unsafe fn io_uring_sqe_set_data64(sqe: *mut io_uring_sqe, data: u64)
232{
233 (*sqe).user_data = data;
234}
235
236#[must_use]
237#[inline]
238pub unsafe fn io_uring_cqe_get_data64(cqe: *const io_uring_cqe) -> u64
239{
240 (*cqe).user_data
241}
242
243#[inline]
244pub unsafe fn io_uring_sqe_set_flags(sqe: *mut io_uring_sqe, flags: c_uint)
245{
246 (*sqe).flags = flags as u8;
247}
248
249#[inline]
250pub unsafe fn io_uring_sqe_set_buf_group(sqe: *mut io_uring_sqe, bgid: c_int)
251{
252 (*sqe).__liburing_anon_4.buf_group = bgid as u16;
253}
254
255#[inline]
256unsafe fn __io_uring_set_target_fixed_file(sqe: *mut io_uring_sqe, file_index: c_uint)
257{
258 (*sqe).__liburing_anon_5.file_index = file_index + 1;
260}
261
262#[inline]
263pub unsafe fn io_uring_initialize_sqe(sqe: *mut io_uring_sqe)
264{
265 (*sqe).flags = 0;
266 (*sqe).ioprio = 0;
267 (*sqe).__liburing_anon_3.rw_flags = 0;
268 (*sqe).__liburing_anon_4.buf_index = 0;
269 (*sqe).personality = 0;
270 (*sqe).__liburing_anon_5.file_index = 0;
271 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = 0;
272 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().__pad2[0] = 0;
273}
274
275#[inline]
276pub unsafe fn io_uring_prep_rw(op: c_uint, sqe: *mut io_uring_sqe, fd: c_int, addr: *const c_void,
277 len: c_uint, offset: __u64)
278{
279 (*sqe).opcode = op as u8;
280 (*sqe).fd = fd;
281 (*sqe).__liburing_anon_1.off = offset;
282 (*sqe).__liburing_anon_2.addr = addr as u64;
283 (*sqe).len = len;
284}
285
286#[inline]
308pub unsafe fn io_uring_prep_splice(sqe: *mut io_uring_sqe, fd_in: c_int, off_in: i64,
309 fd_out: c_int, off_out: i64, nbytes: c_uint,
310 splice_flags: c_uint)
311{
312 io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, ptr::null_mut(), nbytes, off_out as u64);
313 (*sqe).__liburing_anon_2.splice_off_in = off_in as u64;
314 (*sqe).__liburing_anon_5.splice_fd_in = fd_in;
315 (*sqe).__liburing_anon_3.splice_flags = splice_flags;
316}
317
318#[inline]
319pub unsafe fn io_uring_prep_tee(sqe: *mut io_uring_sqe, fd_in: c_int, fd_out: c_int,
320 nbytes: c_uint, splice_flags: c_uint)
321{
322 io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, ptr::null_mut(), nbytes, 0);
323 (*sqe).__liburing_anon_2.splice_off_in = 0;
324 (*sqe).__liburing_anon_5.splice_fd_in = fd_in;
325 (*sqe).__liburing_anon_3.splice_flags = splice_flags;
326}
327
328#[inline]
329pub unsafe fn io_uring_prep_readv(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
330 nr_vecs: c_uint, offset: u64)
331{
332 io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs.cast(), nr_vecs, offset);
333}
334
335#[inline]
336pub unsafe fn io_uring_prep_readv2(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
337 nr_vecs: c_uint, offset: u64, flags: c_int)
338{
339 io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
340 (*sqe).__liburing_anon_3.rw_flags = flags;
341}
342
343#[inline]
344pub unsafe fn io_uring_prep_read_fixed(sqe: *mut io_uring_sqe, fd: c_int, buf: *mut c_void,
345 nbytes: c_uint, offset: u64, buf_index: c_int)
346{
347 io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
348 (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
349}
350
351#[inline]
352pub unsafe fn io_uring_prep_readv_fixed(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
353 nr_vecs: c_uint, offset: u64, flags: c_int,
354 buf_index: c_int)
355{
356 io_uring_prep_readv2(sqe, fd, iovecs, nr_vecs, offset, flags);
357 (*sqe).opcode = IORING_OP_READV_FIXED as _;
358 (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
359}
360
361#[inline]
362pub unsafe fn io_uring_prep_writev(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
363 nr_vecs: c_uint, offset: u64)
364{
365 io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs.cast(), nr_vecs, offset);
366}
367
368#[inline]
369pub unsafe fn io_uring_prep_writev2(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
370 nr_vecs: c_uint, offset: u64, flags: c_int)
371{
372 io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
373 (*sqe).__liburing_anon_3.rw_flags = flags;
374}
375
376#[inline]
377pub unsafe fn io_uring_prep_write_fixed(sqe: *mut io_uring_sqe, fd: c_int, buf: *const c_void,
378 nbytes: c_uint, offset: u64, buf_index: c_int)
379{
380 io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
381 (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
382}
383
384#[inline]
385pub unsafe fn io_uring_prep_writev_fixed(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
386 nr_vecs: c_uint, offset: u64, flags: c_int,
387 buf_index: c_int)
388{
389 io_uring_prep_writev2(sqe, fd, iovecs, nr_vecs, offset, flags);
390 (*sqe).opcode = IORING_OP_WRITEV_FIXED as _;
391 (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
392}
393
394#[inline]
395pub unsafe fn io_uring_prep_recvmsg(sqe: *mut io_uring_sqe, fd: c_int, msg: *mut msghdr,
396 flags: c_uint)
397{
398 io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg.cast(), 1, 0);
399 (*sqe).__liburing_anon_3.msg_flags = flags;
400}
401
402#[inline]
403pub unsafe fn io_uring_prep_recvmsg_multishot(sqe: *mut io_uring_sqe, fd: c_int, msg: *mut msghdr,
404 flags: c_uint)
405{
406 io_uring_prep_recvmsg(sqe, fd, msg, flags);
407 (*sqe).ioprio |= IORING_RECV_MULTISHOT as u16;
408}
409
410#[inline]
411pub unsafe fn io_uring_prep_sendmsg(sqe: *mut io_uring_sqe, fd: c_int, msg: *const msghdr,
412 flags: c_uint)
413{
414 io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg.cast(), 1, 0);
415 (*sqe).__liburing_anon_3.msg_flags = flags;
416}
417
418#[must_use]
419#[inline]
420pub fn __io_uring_prep_poll_mask(poll_mask: c_uint) -> c_uint
421{
422 poll_mask.to_le()
423}
424
425#[inline]
426pub unsafe fn io_uring_prep_poll_add(sqe: *mut io_uring_sqe, fd: c_int, poll_mask: c_uint)
427{
428 io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, ptr::null_mut(), 0, 0);
429 (*sqe).__liburing_anon_3.poll32_events = __io_uring_prep_poll_mask(poll_mask);
430}
431
432#[inline]
433pub unsafe fn io_uring_prep_poll_multishot(sqe: *mut io_uring_sqe, fd: c_int, poll_mask: c_uint)
434{
435 io_uring_prep_poll_add(sqe, fd, poll_mask);
436 (*sqe).len = IORING_POLL_ADD_MULTI;
437}
438
439#[inline]
440pub unsafe fn io_uring_prep_poll_remove(sqe: *mut io_uring_sqe, user_data: u64)
441{
442 io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, ptr::null_mut(), 0, 0);
443 (*sqe).__liburing_anon_2.addr = user_data;
444}
445
446#[inline]
447pub unsafe fn io_uring_prep_poll_update(sqe: *mut io_uring_sqe, old_user_data: u64,
448 new_user_data: u64, poll_mask: c_uint, flags: c_uint)
449{
450 io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, ptr::null_mut(), flags, new_user_data);
451 (*sqe).__liburing_anon_2.addr = old_user_data;
452 (*sqe).__liburing_anon_3.poll32_events = __io_uring_prep_poll_mask(poll_mask);
453}
454
455#[inline]
456pub unsafe fn io_uring_prep_fsync(sqe: *mut io_uring_sqe, fd: c_int, fsync_flags: c_uint)
457{
458 io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, ptr::null_mut(), 0, 0);
459 (*sqe).__liburing_anon_3.fsync_flags = fsync_flags;
460}
461
462#[inline]
463pub unsafe fn io_uring_prep_nop(sqe: *mut io_uring_sqe)
464{
465 io_uring_prep_rw(IORING_OP_NOP, sqe, -1, ptr::null_mut(), 0, 0);
466}
467
468#[inline]
469pub unsafe fn io_uring_prep_nop128(sqe: *mut io_uring_sqe)
470{
471 io_uring_prep_rw(IORING_OP_NOP128, sqe, -1, ptr::null_mut(), 0, 0);
472}
473
474#[inline]
475pub unsafe fn io_uring_prep_timeout(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
476 count: c_uint, flags: c_uint)
477{
478 io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts.cast(), 1, count.into());
479 (*sqe).__liburing_anon_3.timeout_flags = flags;
480}
481
482#[inline]
483pub unsafe fn io_uring_prep_timeout_remove(sqe: *mut io_uring_sqe, user_data: __u64, flags: c_uint)
484{
485 io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, ptr::null_mut(), 0, 0);
486 (*sqe).__liburing_anon_2.addr = user_data;
487 (*sqe).__liburing_anon_3.timeout_flags = flags;
488}
489
490#[inline]
491pub unsafe fn io_uring_prep_timeout_update(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
492 user_data: __u64, flags: c_uint)
493{
494 io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, ptr::null_mut(), 0, ts as u64);
495 (*sqe).__liburing_anon_2.addr = user_data;
496 (*sqe).__liburing_anon_3.timeout_flags = flags | IORING_TIMEOUT_UPDATE;
497}
498
499#[inline]
500pub unsafe fn io_uring_prep_accept(sqe: *mut io_uring_sqe, fd: c_int, addr: *mut sockaddr,
501 addrlen: *mut socklen_t, flags: c_int)
502{
503 io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr.cast(), 0, uring_ptr_to_u64(addrlen.cast()));
504 (*sqe).__liburing_anon_3.accept_flags = flags as u32;
505}
506
507#[inline]
509pub unsafe fn io_uring_prep_accept_direct(sqe: *mut io_uring_sqe, fd: c_int, addr: *mut sockaddr,
510 addrlen: *mut socklen_t, flags: c_int,
511 mut file_index: c_uint)
512{
513 io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
514 if file_index == IORING_FILE_INDEX_ALLOC as _ {
516 file_index -= 1;
517 }
518 __io_uring_set_target_fixed_file(sqe, file_index);
519}
520
521#[inline]
522pub unsafe fn io_uring_prep_multishot_accept(sqe: *mut io_uring_sqe, fd: c_int,
523 addr: *mut sockaddr, addrlen: *mut socklen_t,
524 flags: c_int)
525{
526 io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
527 (*sqe).ioprio |= IORING_ACCEPT_MULTISHOT as u16;
528}
529
530#[inline]
532pub unsafe fn io_uring_prep_multishot_accept_direct(sqe: *mut io_uring_sqe, fd: c_int,
533 addr: *mut sockaddr, addrlen: *mut socklen_t,
534 flags: c_int)
535{
536 io_uring_prep_multishot_accept(sqe, fd, addr, addrlen, flags);
537 __io_uring_set_target_fixed_file(sqe, (IORING_FILE_INDEX_ALLOC - 1) as u32);
538}
539
540#[inline]
541pub unsafe fn io_uring_prep_cancel64(sqe: *mut io_uring_sqe, user_data: u64, flags: c_int)
542{
543 io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, ptr::null_mut(), 0, 0);
544 (*sqe).__liburing_anon_2.addr = user_data;
545 (*sqe).__liburing_anon_3.cancel_flags = flags as u32;
546}
547
548#[inline]
549pub unsafe fn io_uring_prep_cancel(sqe: *mut io_uring_sqe, user_data: *const c_void, flags: c_int)
550{
551 io_uring_prep_cancel64(sqe, user_data as usize as u64, flags);
552}
553
554#[inline]
555pub unsafe fn io_uring_prep_cancel_fd(sqe: *mut io_uring_sqe, fd: c_int, flags: c_uint)
556{
557 io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, fd, ptr::null_mut(), 0, 0);
558 (*sqe).__liburing_anon_3.cancel_flags = flags | IORING_ASYNC_CANCEL_FD;
559}
560
561#[inline]
562pub unsafe fn io_uring_prep_link_timeout(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
563 flags: c_uint)
564{
565 io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts.cast(), 1, 0);
566 (*sqe).__liburing_anon_3.timeout_flags = flags;
567}
568
569#[inline]
570pub unsafe fn io_uring_prep_connect(sqe: *mut io_uring_sqe, fd: c_int, addr: *const sockaddr,
571 addrlen: socklen_t)
572{
573 io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr.cast(), 0, addrlen.into());
574}
575
576#[inline]
577pub unsafe fn io_uring_prep_bind(sqe: *mut io_uring_sqe, fd: c_int, addr: *const sockaddr,
578 addrlen: socklen_t)
579{
580 io_uring_prep_rw(IORING_OP_BIND, sqe, fd, addr.cast(), 0, addrlen.into());
581}
582
583#[inline]
584pub unsafe fn io_uring_prep_listen(sqe: *mut io_uring_sqe, fd: c_int, backlog: c_int)
585{
586 io_uring_prep_rw(IORING_OP_LISTEN, sqe, fd, ptr::null_mut(), backlog as _, 0);
587}
588
589#[inline]
590pub unsafe fn io_uring_prep_epoll_wait(sqe: *mut io_uring_sqe, fd: c_int,
591 events: *mut epoll_event, maxevents: c_int, flags: c_uint)
592{
593 io_uring_prep_rw(IORING_OP_EPOLL_WAIT, sqe, fd, events.cast(), maxevents as _, 0);
594 (*sqe).__liburing_anon_3.rw_flags = flags as _;
595}
596
597#[inline]
598pub unsafe fn io_uring_prep_files_update(sqe: *mut io_uring_sqe, fds: *const c_int,
599 nr_fds: c_uint, offset: c_int)
600{
601 io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds.cast(), nr_fds, offset as u64);
602}
603
604#[inline]
605pub unsafe fn io_uring_prep_fallocate(sqe: *mut io_uring_sqe, fd: c_int, mode: c_int, offset: u64,
606 len: u64)
607{
608 io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd, ptr::null_mut(), mode as c_uint, offset);
609 (*sqe).__liburing_anon_2.addr = len;
610}
611
612#[inline]
613pub unsafe fn io_uring_prep_openat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
614 flags: c_int, mode: mode_t)
615{
616 io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path.cast(), mode, 0);
617 (*sqe).__liburing_anon_3.open_flags = flags as u32;
618}
619
620#[inline]
622pub unsafe fn io_uring_prep_openat_direct(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
623 flags: c_int, mode: mode_t, mut file_index: c_uint)
624{
625 io_uring_prep_openat(sqe, dfd, path, flags, mode);
626 if file_index == IORING_FILE_INDEX_ALLOC as _ {
628 file_index -= 1;
629 }
630 __io_uring_set_target_fixed_file(sqe, file_index);
631}
632
633#[inline]
634pub unsafe fn io_uring_prep_open(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int,
635 mode: mode_t)
636{
637 io_uring_prep_openat(sqe, AT_FDCWD, path, flags, mode);
638}
639
640#[inline]
642pub unsafe fn io_uring_prep_open_direct(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int,
643 mode: mode_t, file_index: c_uint)
644{
645 io_uring_prep_openat_direct(sqe, AT_FDCWD, path, flags, mode, file_index);
646}
647
648#[inline]
649pub unsafe fn io_uring_prep_close(sqe: *mut io_uring_sqe, fd: c_int)
650{
651 io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, ptr::null_mut(), 0, 0);
652}
653
654#[inline]
655pub unsafe fn io_uring_prep_close_direct(sqe: *mut io_uring_sqe, file_index: c_uint)
656{
657 io_uring_prep_close(sqe, 0);
658 __io_uring_set_target_fixed_file(sqe, file_index);
659}
660
661#[inline]
662pub unsafe fn io_uring_prep_read(sqe: *mut io_uring_sqe, fd: c_int, buf: *mut c_void,
663 nbytes: c_uint, offset: u64)
664{
665 io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
666}
667
668#[inline]
669pub unsafe fn io_uring_prep_read_multishot(sqe: *mut io_uring_sqe, fd: c_int, nbytes: c_uint,
670 offset: u64, buf_group: c_int)
671{
672 io_uring_prep_rw(IORING_OP_READ_MULTISHOT, sqe, fd, ptr::null_mut(), nbytes, offset);
673 (*sqe).__liburing_anon_4.buf_group = buf_group as _;
674 (*sqe).flags = IOSQE_BUFFER_SELECT as _;
675}
676
677#[inline]
678pub unsafe fn io_uring_prep_write(sqe: *mut io_uring_sqe, fd: c_int, buf: *const c_void,
679 nbytes: c_uint, offset: u64)
680{
681 io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
682}
683
684#[inline]
685pub unsafe fn io_uring_prep_statx(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
686 flags: c_int, mask: c_uint, statxbuf: *mut statx)
687{
688 io_uring_prep_rw(IORING_OP_STATX,
689 sqe,
690 dfd,
691 path.cast(),
692 mask,
693 uring_ptr_to_u64(statxbuf.cast()));
694 (*sqe).__liburing_anon_3.statx_flags = flags as u32;
695}
696
697#[inline]
698pub unsafe fn io_uring_prep_fadvise(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, len: u32,
699 advice: c_int)
700{
701 io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, ptr::null_mut(), len, offset);
702 (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
703}
704
705#[inline]
706pub unsafe fn io_uring_prep_madvise(sqe: *mut io_uring_sqe, addr: *mut c_void, length: u32,
707 advice: c_int)
708{
709 io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, length, 0);
710 (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
711}
712
713#[inline]
714pub unsafe fn io_uring_prep_fadvise64(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, len: off_t,
715 advice: c_int)
716{
717 io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, ptr::null_mut(), 0, offset);
718 (*sqe).__liburing_anon_2.addr = len as _;
719 (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
720}
721
722#[inline]
723pub unsafe fn io_uring_prep_madvise64(sqe: *mut io_uring_sqe, addr: *mut c_void, length: off_t,
724 advice: c_int)
725{
726 io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, 0, length as _);
727 (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
728}
729
730#[inline]
731pub unsafe fn io_uring_prep_send(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
732 len: usize, flags: c_int)
733{
734 io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, len as u32, 0);
735 (*sqe).__liburing_anon_3.msg_flags = flags as u32;
736}
737
738#[inline]
739pub unsafe fn io_uring_prep_send_bundle(sqe: *mut io_uring_sqe, sockfd: c_int, len: usize,
740 flags: c_int)
741{
742 io_uring_prep_send(sqe, sockfd, ptr::null_mut(), len, flags);
743 (*sqe).ioprio |= IORING_RECVSEND_BUNDLE as u16;
744}
745
746#[inline]
747pub unsafe fn io_uring_prep_send_set_addr(sqe: *mut io_uring_sqe, dest_addr: *const sockaddr,
748 addr_len: u16)
749{
750 (*sqe).__liburing_anon_1.addr2 = dest_addr as usize as u64;
751 (*sqe).__liburing_anon_5.__liburing_anon_1.addr_len = addr_len;
752}
753
754#[inline]
755pub unsafe fn io_uring_prep_sendto(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
756 len: usize, flags: c_int, addr: *const sockaddr,
757 addrlen: socklen_t)
758{
759 io_uring_prep_send(sqe, sockfd, buf, len, flags);
760 io_uring_prep_send_set_addr(sqe, addr, addrlen as _);
761}
762
763#[inline]
764pub unsafe fn io_uring_prep_send_zc(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
765 len: usize, flags: c_int, zc_flags: c_uint)
766{
767 io_uring_prep_rw(IORING_OP_SEND_ZC, sqe, sockfd, buf, len as u32, 0);
768 (*sqe).__liburing_anon_3.msg_flags = flags as u32;
769 (*sqe).ioprio = zc_flags as _;
770}
771
772#[inline]
773pub unsafe fn io_uring_prep_send_zc_fixed(sqe: *mut io_uring_sqe, sockfd: c_int,
774 buf: *const c_void, len: usize, flags: c_int,
775 zc_flags: c_uint, buf_index: c_uint)
776{
777 io_uring_prep_send_zc(sqe, sockfd, buf, len, flags, zc_flags);
778 (*sqe).ioprio |= IORING_RECVSEND_FIXED_BUF as u16;
779 (*sqe).__liburing_anon_4.buf_index = buf_index as _;
780}
781
782#[inline]
783pub unsafe fn io_uring_prep_sendmsg_zc(sqe: *mut io_uring_sqe, fd: c_int, msg: *const msghdr,
784 flags: c_uint)
785{
786 io_uring_prep_sendmsg(sqe, fd, msg, flags);
787 (*sqe).opcode = IORING_OP_SENDMSG_ZC as _;
788}
789
790#[inline]
791pub unsafe fn io_uring_prep_sendmsg_zc_fixed(sqe: *mut io_uring_sqe, fd: c_int,
792 msg: *const msghdr, flags: c_uint, buf_index: c_uint)
793{
794 io_uring_prep_sendmsg_zc(sqe, fd, msg, flags);
795 (*sqe).ioprio |= IORING_RECVSEND_FIXED_BUF as u16;
796 (*sqe).__liburing_anon_4.buf_index = buf_index as _;
797}
798
799#[inline]
800pub unsafe fn io_uring_prep_recv(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *mut c_void,
801 len: usize, flags: c_int)
802{
803 io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, len as u32, 0);
804 (*sqe).__liburing_anon_3.msg_flags = flags as u32;
805}
806
807#[inline]
808pub unsafe fn io_uring_prep_recv_multishot(sqe: *mut io_uring_sqe, sockfd: c_int,
809 buf: *mut c_void, len: usize, flags: c_int)
810{
811 io_uring_prep_recv(sqe, sockfd, buf, len, flags);
812 (*sqe).ioprio |= IORING_RECV_MULTISHOT as u16;
813}
814
815#[inline]
816pub unsafe fn io_uring_recvmsg_validate(buf: *mut c_void, buf_len: c_int, msgh: *mut msghdr)
817 -> *mut io_uring_recvmsg_out
818{
819 let header = (*msgh).msg_controllen as usize
820 + (*msgh).msg_namelen as usize
821 + mem::size_of::<io_uring_recvmsg_out>();
822
823 if buf_len < 0 || (buf_len as usize) < header {
824 return ptr::null_mut();
825 }
826
827 buf.cast()
828}
829
830#[inline]
831pub unsafe fn io_uring_recvmsg_name(o: *mut io_uring_recvmsg_out) -> *mut c_void
832{
833 o.add(1).cast()
834}
835
836#[inline]
837pub unsafe fn io_uring_recvmsg_cmsg_firsthdr(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr)
838 -> *mut cmsghdr
839{
840 if ((*o).controllen as usize) < mem::size_of::<cmsghdr>() {
841 return ptr::null_mut();
842 }
843
844 io_uring_recvmsg_name(o).cast::<u8>()
845 .add((*msgh).msg_namelen as _)
846 .cast()
847}
848
849#[inline]
850pub unsafe fn io_uring_recvmsg_cmsg_nexthdr(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr,
851 cmsg: *mut cmsghdr)
852 -> *mut cmsghdr
853{
854 #[allow(non_snake_case)]
855 fn CMSG_ALIGN(len: usize) -> usize
856 {
857 ((len) + mem::size_of::<usize>() - 1) & !(mem::size_of::<usize>() - 1)
858 }
859
860 if ((*cmsg).cmsg_len as usize) < mem::size_of::<cmsghdr>() {
861 return ptr::null_mut();
862 }
863
864 let end = io_uring_recvmsg_cmsg_firsthdr(o, msgh).cast::<u8>()
865 .add((*o).controllen as _);
866
867 let cmsg = cmsg.cast::<u8>()
868 .add(CMSG_ALIGN((*cmsg).cmsg_len as usize))
869 .cast::<cmsghdr>();
870
871 if cmsg.add(1).cast::<u8>() > end {
872 return ptr::null_mut();
873 }
874
875 if cmsg.cast::<u8>().add(CMSG_ALIGN((*cmsg).cmsg_len as usize)) > end {
876 return ptr::null_mut();
877 }
878
879 cmsg
880}
881
882#[inline]
883pub unsafe fn io_uring_recvmsg_payload(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr)
884 -> *mut c_void
885{
886 io_uring_recvmsg_name(o).cast::<u8>()
887 .add((*msgh).msg_namelen as usize + (*msgh).msg_controllen as usize)
888 .cast::<c_void>()
889}
890
891#[inline]
892pub unsafe fn io_uring_recvmsg_payload_length(o: *mut io_uring_recvmsg_out, buf_len: c_int,
893 msgh: *mut msghdr)
894 -> c_uint
895{
896 let payload_start = io_uring_recvmsg_payload(o, msgh) as usize;
897 let payload_end = o as usize + buf_len as usize;
898 (payload_end - payload_start) as _
899}
900
901#[inline]
902pub unsafe fn io_uring_prep_openat2(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
903 how: *const open_how)
904{
905 io_uring_prep_rw(IORING_OP_OPENAT2 as _,
906 sqe,
907 dfd,
908 path.cast(),
909 mem::size_of::<open_how>() as u32,
910 how as usize as u64);
911}
912
913#[inline]
915pub unsafe fn io_uring_prep_openat2_direct(sqe: *mut io_uring_sqe, dfd: c_int,
916 path: *const c_char, how: *const open_how,
917 mut file_index: c_uint)
918{
919 io_uring_prep_openat2(sqe, dfd, path, how);
920 if file_index == IORING_FILE_INDEX_ALLOC as _ {
922 file_index -= 1;
923 }
924 __io_uring_set_target_fixed_file(sqe, file_index);
925}
926
927#[inline]
928pub unsafe fn io_uring_prep_epoll_ctl(sqe: *mut io_uring_sqe, epfd: c_int, fd: c_int, op: c_int,
929 ev: *const epoll_event)
930{
931 io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev.cast(), op as u32, u64::from(fd as u32));
932}
933
934#[inline]
935pub unsafe fn io_uring_prep_provide_buffers(sqe: *mut io_uring_sqe, addr: *mut c_void, len: c_int,
936 nr: c_int, bgid: c_int, bid: c_int)
937{
938 io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, len as u32, bid as u64);
939 (*sqe).__liburing_anon_4.buf_group = bgid as u16;
940}
941
942#[inline]
943pub unsafe fn io_uring_prep_remove_buffers(sqe: *mut io_uring_sqe, nr: c_int, bgid: c_int)
944{
945 io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, ptr::null_mut(), 0, 0);
946 (*sqe).__liburing_anon_4.buf_group = bgid as u16;
947}
948
949#[inline]
950pub unsafe fn io_uring_prep_shutdown(sqe: *mut io_uring_sqe, fd: c_int, how: c_int)
951{
952 io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, ptr::null_mut(), how as u32, 0);
953}
954
955#[inline]
956pub unsafe fn io_uring_prep_unlinkat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
957 flags: c_int)
958{
959 io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path.cast(), 0, 0);
960 (*sqe).__liburing_anon_3.unlink_flags = flags as u32;
961}
962
963#[inline]
964pub unsafe fn io_uring_prep_unlink(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int)
965{
966 io_uring_prep_unlinkat(sqe, AT_FDCWD, path, flags);
967}
968
969#[inline]
970pub unsafe fn io_uring_prep_renameat(sqe: *mut io_uring_sqe, olddfd: c_int,
971 oldpath: *const c_char, newdfd: c_int,
972 newpath: *const c_char, flags: c_uint)
973{
974 io_uring_prep_rw(IORING_OP_RENAMEAT,
975 sqe,
976 olddfd,
977 oldpath.cast(),
978 newdfd as u32,
979 newpath as usize as u64);
980 (*sqe).__liburing_anon_3.rename_flags = flags;
981}
982
983#[inline]
984pub unsafe fn io_uring_prep_rename(sqe: *mut io_uring_sqe, oldpath: *const c_char,
985 newpath: *const c_char)
986{
987 io_uring_prep_renameat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, 0);
988}
989
990#[inline]
991pub unsafe fn io_uring_prep_sync_file_range(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint,
992 offset: u64, flags: c_int)
993{
994 io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, ptr::null_mut(), len, offset);
995 (*sqe).__liburing_anon_3.sync_range_flags = flags as u32;
996}
997
998#[inline]
999pub unsafe fn io_uring_prep_mkdirat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
1000 mode: mode_t)
1001{
1002 io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path.cast(), mode, 0);
1003}
1004
1005#[inline]
1006pub unsafe fn io_uring_prep_mkdir(sqe: *mut io_uring_sqe, path: *const c_char, mode: mode_t)
1007{
1008 io_uring_prep_mkdirat(sqe, AT_FDCWD, path, mode);
1009}
1010
1011#[inline]
1012pub unsafe fn io_uring_prep_symlinkat(sqe: *mut io_uring_sqe, target: *const c_char,
1013 newdirfd: c_int, linkpath: *const c_char)
1014{
1015 io_uring_prep_rw(IORING_OP_SYMLINKAT,
1016 sqe,
1017 newdirfd,
1018 target.cast(),
1019 0,
1020 linkpath as usize as u64);
1021}
1022#[inline]
1023pub unsafe fn io_uring_prep_symlink(sqe: *mut io_uring_sqe, target: *const c_char,
1024 linkpath: *const c_char)
1025{
1026 io_uring_prep_symlinkat(sqe, target, AT_FDCWD, linkpath);
1027}
1028
1029#[inline]
1030pub unsafe fn io_uring_prep_linkat(sqe: *mut io_uring_sqe, olddfd: c_int, oldpath: *const c_char,
1031 newdfd: c_int, newpath: *const c_char, flags: c_int)
1032{
1033 io_uring_prep_rw(IORING_OP_LINKAT,
1034 sqe,
1035 olddfd,
1036 oldpath.cast(),
1037 newdfd as u32,
1038 newpath as usize as u64);
1039 (*sqe).__liburing_anon_3.hardlink_flags = flags as u32;
1040}
1041
1042#[inline]
1043pub unsafe fn io_uring_prep_link(sqe: *mut io_uring_sqe, oldpath: *const c_char,
1044 newpath: *const c_char, flags: c_int)
1045{
1046 io_uring_prep_linkat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, flags);
1047}
1048
1049#[inline]
1050pub unsafe fn io_uring_prep_msg_ring_cqe_flags(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint,
1051 data: u64, flags: c_uint, cqe_flags: c_uint)
1052{
1053 io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, ptr::null_mut(), len, data);
1054 (*sqe).__liburing_anon_3.msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
1055 (*sqe).__liburing_anon_5.file_index = cqe_flags;
1056}
1057
1058#[inline]
1059pub unsafe fn io_uring_prep_msg_ring(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint, data: u64,
1060 flags: c_uint)
1061{
1062 io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, ptr::null_mut(), len, data);
1063 (*sqe).__liburing_anon_3.msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
1064}
1065
1066#[inline]
1067pub unsafe fn io_uring_prep_msg_ring_fd(sqe: *mut io_uring_sqe, fd: c_int, source_fd: c_int,
1068 mut target_fd: c_int, data: u64, flags: c_uint)
1069{
1070 io_uring_prep_rw(IORING_OP_MSG_RING,
1071 sqe,
1072 fd,
1073 IORING_MSG_SEND_FD as usize as *const c_void,
1074 0,
1075 data);
1076 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = source_fd as _;
1077 if target_fd == IORING_FILE_INDEX_ALLOC as _ {
1079 target_fd -= 1;
1080 }
1081 __io_uring_set_target_fixed_file(sqe, target_fd as _);
1082 (*sqe).__liburing_anon_3.msg_ring_flags = flags;
1083}
1084
1085#[inline]
1086pub unsafe fn io_uring_prep_msg_ring_fd_alloc(sqe: *mut io_uring_sqe, fd: c_int, source_fd: c_int,
1087 data: u64, flags: c_uint)
1088{
1089 io_uring_prep_msg_ring_fd(sqe, fd, source_fd, IORING_FILE_INDEX_ALLOC, data, flags);
1090}
1091
1092#[inline]
1093pub unsafe fn io_uring_prep_getxattr(sqe: *mut io_uring_sqe, name: *const c_char,
1094 value: *mut c_char, path: *const c_char, len: c_uint)
1095{
1096 io_uring_prep_rw(IORING_OP_GETXATTR, sqe, 0, name.cast(), len, value as usize as u64);
1097 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = path as usize as u64;
1098
1099 (*sqe).__liburing_anon_3.xattr_flags = 0;
1100}
1101
1102#[inline]
1103pub unsafe fn io_uring_prep_setxattr(sqe: *mut io_uring_sqe, name: *const c_char,
1104 value: *const c_char, path: *const c_char, flags: c_int,
1105 len: c_uint)
1106{
1107 io_uring_prep_rw(IORING_OP_SETXATTR, sqe, 0, name.cast(), len, value as usize as u64);
1108 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = path as usize as u64;
1109 (*sqe).__liburing_anon_3.xattr_flags = flags as _;
1110}
1111
1112#[inline]
1113pub unsafe fn io_uring_prep_fgetxattr(sqe: *mut io_uring_sqe, fd: c_int, name: *const c_char,
1114 value: *mut c_char, len: c_uint)
1115{
1116 io_uring_prep_rw(IORING_OP_FGETXATTR, sqe, fd, name.cast(), len, value as usize as u64);
1117 (*sqe).__liburing_anon_3.xattr_flags = 0;
1118}
1119
1120#[inline]
1121pub unsafe fn io_uring_prep_fsetxattr(sqe: *mut io_uring_sqe, fd: c_int, name: *const c_char,
1122 value: *mut c_char, flags: c_int, len: c_uint)
1123{
1124 io_uring_prep_rw(IORING_OP_FSETXATTR, sqe, fd, name.cast(), len, value as usize as u64);
1125 (*sqe).__liburing_anon_3.xattr_flags = flags as _;
1126}
1127
1128#[inline]
1129pub unsafe fn io_uring_prep_socket(sqe: *mut io_uring_sqe, domain: c_int, r#type: c_int,
1130 protocol: c_int, flags: c_uint)
1131{
1132 io_uring_prep_rw(IORING_OP_SOCKET,
1133 sqe,
1134 domain,
1135 ptr::null_mut(),
1136 protocol as u32,
1137 r#type as u64);
1138 (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1139}
1140
1141#[inline]
1142pub unsafe fn io_uring_prep_socket_direct(sqe: *mut io_uring_sqe, domain: c_int, r#type: c_int,
1143 protocol: c_int, mut file_index: c_uint, flags: c_uint)
1144{
1145 io_uring_prep_rw(IORING_OP_SOCKET,
1146 sqe,
1147 domain,
1148 ptr::null_mut(),
1149 protocol as u32,
1150 r#type as u64);
1151 (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1152 if file_index == IORING_FILE_INDEX_ALLOC as _ {
1154 file_index -= 1;
1155 }
1156 __io_uring_set_target_fixed_file(sqe, file_index);
1157}
1158
1159#[inline]
1160pub unsafe fn io_uring_prep_socket_direct_alloc(sqe: *mut io_uring_sqe, domain: c_int,
1161 r#type: c_int, protocol: c_int, flags: c_uint)
1162{
1163 io_uring_prep_rw(IORING_OP_SOCKET,
1164 sqe,
1165 domain,
1166 ptr::null_mut(),
1167 protocol as u32,
1168 r#type as u64);
1169 (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1170 __io_uring_set_target_fixed_file(sqe, (IORING_FILE_INDEX_ALLOC - 1) as _);
1171}
1172
1173#[inline]
1174pub unsafe fn __io_uring_prep_uring_cmd(sqe: *mut io_uring_sqe, op: c_int, cmd_op: u32, fd: c_int)
1175{
1176 (*sqe).opcode = op as _;
1177 (*sqe).fd = fd;
1178 (*sqe).__liburing_anon_1.__liburing_anon_1.cmd_op = cmd_op;
1179 (*sqe).__liburing_anon_1.__liburing_anon_1.__pad1 = 0;
1180 (*sqe).__liburing_anon_2.addr = 0;
1181 (*sqe).len = 0;
1182}
1183
1184#[inline]
1185pub unsafe fn io_uring_prep_uring_cmd(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int)
1186{
1187 __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD as _, cmd_op as _, fd);
1188}
1189
1190#[inline]
1191pub unsafe fn io_uring_prep_uring_cmd128(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int)
1192{
1193 __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD128 as _, cmd_op as _, fd);
1194}
1195
1196#[inline]
1200pub unsafe fn io_uring_prep_cmd_sock(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int,
1201 level: c_int, optname: c_int, optval: *mut c_void,
1202 optlen: c_int)
1203{
1204 io_uring_prep_uring_cmd(sqe, cmd_op as _, fd);
1205
1206 *(*sqe).__liburing_anon_6.optval.as_mut() = optval as usize as _;
1207 (*sqe).__liburing_anon_2.__liburing_anon_1.optname = optname as _;
1208 (*sqe).__liburing_anon_5.optlen = optlen as _;
1209 (*sqe).__liburing_anon_1.__liburing_anon_1.cmd_op = cmd_op as _;
1210 (*sqe).__liburing_anon_2.__liburing_anon_1.level = level as _;
1211}
1212
1213#[inline]
1214pub unsafe fn io_uring_prep_cmd_getsockname(sqe: *mut io_uring_sqe, fd: c_int,
1215 sockaddr: *mut sockaddr, sockaddr_len: *mut socklen_t,
1216 peer: c_int)
1217{
1218 io_uring_prep_uring_cmd(sqe, SOCKET_URING_OP_GETSOCKNAME as _, fd);
1219
1220 (*sqe).__liburing_anon_2.addr = sockaddr as _;
1221 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = sockaddr_len as _;
1222 (*sqe).__liburing_anon_5.optlen = peer as _;
1223}
1224
1225#[inline]
1226pub unsafe fn io_uring_prep_waitid(sqe: *mut io_uring_sqe, idtype: idtype_t, id: id_t,
1227 infop: *mut siginfo_t, options: c_int, flags: c_uint)
1228{
1229 io_uring_prep_rw(IORING_OP_WAITID, sqe, id as _, ptr::null_mut(), idtype, 0);
1230 (*sqe).__liburing_anon_3.waitid_flags = flags;
1231 (*sqe).__liburing_anon_5.file_index = options as _;
1232 (*sqe).__liburing_anon_1.addr2 = infop as usize as u64;
1233}
1234
1235#[inline]
1236pub unsafe fn io_uring_prep_futex_wake(sqe: *mut io_uring_sqe, futex: *const u32, val: u64,
1237 mask: u64, futex_flags: u32, flags: c_uint)
1238{
1239 io_uring_prep_rw(IORING_OP_FUTEX_WAKE, sqe, futex_flags as _, futex.cast(), 0, val);
1240 (*sqe).__liburing_anon_3.futex_flags = flags;
1241 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = mask;
1242}
1243
1244#[inline]
1245pub unsafe fn io_uring_prep_futex_wait(sqe: *mut io_uring_sqe, futex: *const u32, val: u64,
1246 mask: u64, futex_flags: u32, flags: c_uint)
1247{
1248 io_uring_prep_rw(IORING_OP_FUTEX_WAIT, sqe, futex_flags as _, futex.cast(), 0, val);
1249 (*sqe).__liburing_anon_3.futex_flags = flags;
1250 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = mask;
1251}
1252
1253#[inline]
1254pub unsafe fn io_uring_prep_futex_waitv(sqe: *mut io_uring_sqe, futex: *const futex_waitv,
1255 nr_futex: u32, flags: c_uint)
1256{
1257 io_uring_prep_rw(IORING_OP_FUTEX_WAITV, sqe, 0, futex.cast(), nr_futex, 0);
1258 (*sqe).__liburing_anon_3.futex_flags = flags;
1259}
1260
1261#[inline]
1262pub unsafe fn io_uring_prep_fixed_fd_install(sqe: *mut io_uring_sqe, fd: c_int, flags: c_uint)
1263{
1264 io_uring_prep_rw(IORING_OP_FIXED_FD_INSTALL, sqe, fd, ptr::null_mut(), 0, 0);
1265
1266 (*sqe).flags = IOSQE_FIXED_FILE as _;
1267 (*sqe).__liburing_anon_3.install_fd_flags = flags;
1268}
1269
1270#[inline]
1271pub unsafe fn io_uring_prep_ftruncate(sqe: *mut io_uring_sqe, fd: c_int, len: c_longlong)
1272{
1273 io_uring_prep_rw(IORING_OP_FTRUNCATE, sqe, fd, ptr::null_mut(), 0, len as _);
1274}
1275
1276#[inline]
1277pub unsafe fn io_uring_prep_cmd_discard(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, nbytes: u64)
1278{
1279 io_uring_prep_uring_cmd(sqe, ((0x12) << 8) as _ , fd);
1283
1284 (*sqe).__liburing_anon_2.addr = offset;
1285 (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = nbytes;
1286}
1287
1288#[inline]
1289pub unsafe fn io_uring_prep_pipe(sqe: *mut io_uring_sqe, fds: *mut c_int, pipe_flags: c_int)
1290{
1291 io_uring_prep_rw(IORING_OP_PIPE, sqe, 0, fds as *const _, 0, 0);
1292 (*sqe).__liburing_anon_3.pipe_flags = pipe_flags as u32;
1293}
1294
1295#[inline]
1297pub unsafe fn io_uring_prep_pipe_direct(sqe: *mut io_uring_sqe, fds: *mut c_int,
1298 pipe_flags: c_int, mut file_index: c_uint)
1299{
1300 io_uring_prep_pipe(sqe, fds, pipe_flags);
1301 if file_index == IORING_FILE_INDEX_ALLOC as u32 {
1303 file_index -= 1;
1304 }
1305 __io_uring_set_target_fixed_file(sqe, file_index);
1306}
1307
1308#[inline]
1310pub unsafe fn io_uring_load_sq_head(ring: *mut io_uring) -> c_uint
1311{
1312 if (*ring).flags & IORING_SETUP_SQPOLL > 0 {
1318 return io_uring_smp_load_acquire((*ring).sq.khead);
1319 }
1320
1321 *(*ring).sq.khead
1322}
1323
1324#[inline]
1329pub unsafe fn io_uring_sq_ready(ring: *mut io_uring) -> c_uint
1330{
1331 (*ring).sq.sqe_tail - io_uring_load_sq_head(ring)
1332}
1333
1334#[inline]
1338pub unsafe fn io_uring_sq_space_left(ring: *mut io_uring) -> c_uint
1339{
1340 (*ring).sq.ring_entries - io_uring_sq_ready(ring)
1341}
1342
1343#[must_use]
1349#[inline]
1350pub fn io_uring_sqe_shift_from_flags(flags: c_uint) -> c_uint
1351{
1352 u32::from(flags & IORING_SETUP_SQE128 != 0)
1353}
1354
1355#[inline]
1356pub unsafe fn io_uring_sqe_shift(ring: *mut io_uring) -> c_uint
1357{
1358 io_uring_sqe_shift_from_flags((*ring).flags)
1359}
1360
1361#[inline]
1369pub unsafe fn io_uring_sqring_wait(ring: *mut io_uring) -> c_int
1370{
1371 if (*ring).flags & IORING_SETUP_SQPOLL == 0 {
1372 return 0;
1373 }
1374 if io_uring_sq_space_left(ring) > 0 {
1375 return 0;
1376 }
1377
1378 __io_uring_sqring_wait(ring)
1379}
1380
1381#[inline]
1385pub unsafe fn io_uring_cq_ready(ring: *mut io_uring) -> c_uint
1386{
1387 io_uring_smp_load_acquire((*ring).cq.ktail) - *(*ring).cq.khead
1388}
1389
1390#[inline]
1395pub unsafe fn io_uring_cq_has_overflow(ring: *mut io_uring) -> bool
1396{
1397 IO_URING_READ_ONCE((*ring).sq.kflags) & IORING_SQ_CQ_OVERFLOW > 0
1398}
1399
1400#[inline]
1404pub unsafe fn io_uring_cq_eventfd_enabled(ring: *mut io_uring) -> bool
1405{
1406 if (*ring).cq.kflags.is_null() {
1407 return true;
1408 }
1409 (*(*ring).cq.kflags & IORING_CQ_EVENTFD_DISABLED) == 0
1410}
1411
1412#[inline]
1417pub unsafe fn io_uring_cq_eventfd_toggle(ring: *mut io_uring, enabled: bool) -> c_int
1418{
1419 if enabled == io_uring_cq_eventfd_enabled(ring) {
1420 return 0;
1421 }
1422
1423 if (*ring).cq.kflags.is_null() {
1424 return -(EOPNOTSUPP as c_int);
1425 }
1426
1427 let mut flags = *(*ring).cq.kflags;
1428
1429 if enabled {
1430 flags &= !IORING_CQ_EVENTFD_DISABLED;
1431 } else {
1432 flags |= IORING_CQ_EVENTFD_DISABLED;
1433 }
1434
1435 IO_URING_WRITE_ONCE((*ring).cq.kflags, flags);
1436
1437 0
1438}
1439
1440#[inline]
1446pub unsafe fn io_uring_wait_cqe_nr(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe,
1447 wait_nr: c_uint)
1448 -> c_int
1449{
1450 __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, ptr::null_mut())
1451}
1452
1453#[inline]
1454unsafe fn io_uring_skip_cqe(ring: *mut io_uring, cqe: *mut io_uring_cqe, err: *mut c_int) -> bool
1455{
1456 'out: {
1457 if (*cqe).flags & IORING_CQE_F_SKIP != 0 {
1458 break 'out;
1459 }
1460
1461 if (*ring).features & IORING_FEAT_EXT_ARG != 0 {
1462 return false;
1463 }
1464
1465 if (*cqe).user_data != LIBURING_UDATA_TIMEOUT {
1466 return false;
1467 }
1468
1469 if (*cqe).res < 0 {
1470 *err = (*cqe).res;
1471 }
1472 }
1473
1474 io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
1475 *err == 0
1476}
1477
1478#[inline]
1484unsafe fn __io_uring_peek_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe,
1485 nr_available: *mut c_uint)
1486 -> c_int
1487{
1488 let mut cqe;
1489 let mut err = 0;
1490
1491 let mut available;
1492 let mask = (*ring).cq.ring_mask;
1493 let shift = io_uring_cqe_shift(ring);
1494
1495 loop {
1496 let tail = io_uring_smp_load_acquire((*ring).cq.ktail);
1497
1498 let head = io_uring_smp_load_acquire((*ring).cq.khead);
1503
1504 cqe = ptr::null_mut();
1505 available = tail - head;
1506 if available == 0 {
1507 break;
1508 }
1509
1510 cqe = &raw mut *(*ring).cq.cqes.add(((head & mask) << shift) as usize);
1511 if !io_uring_skip_cqe(ring, cqe, &raw mut err) {
1512 break;
1513 }
1514 }
1515
1516 *cqe_ptr = cqe;
1517 if !nr_available.is_null() {
1518 *nr_available = available;
1519 }
1520 err
1521}
1522
1523#[inline]
1528pub unsafe fn io_uring_peek_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe) -> c_int
1529{
1530 if __io_uring_peek_cqe(ring, cqe_ptr, ptr::null_mut()) == 0 && !(*cqe_ptr).is_null() {
1531 return 0;
1532 }
1533
1534 io_uring_wait_cqe_nr(ring, cqe_ptr, 0)
1535}
1536
1537#[inline]
1542pub unsafe fn io_uring_wait_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe) -> c_int
1543{
1544 if __io_uring_peek_cqe(ring, cqe_ptr, ptr::null_mut()) == 0 && !(*cqe_ptr).is_null() {
1545 return 0;
1546 }
1547
1548 io_uring_wait_cqe_nr(ring, cqe_ptr, 1)
1549}
1550
1551#[inline]
1559unsafe fn _io_uring_get_sqe(ring: *mut io_uring) -> *mut io_uring_sqe
1560{
1561 let sq = &raw mut (*ring).sq;
1562
1563 let head = io_uring_load_sq_head(ring);
1564 let tail = (*sq).sqe_tail;
1565
1566 if tail - head >= (*sq).ring_entries {
1567 return ptr::null_mut();
1568 }
1569
1570 let offset = (tail & (*sq).ring_mask) << io_uring_sqe_shift(ring);
1571 let sqe = (*sq).sqes.add(offset as usize);
1572 (*sq).sqe_tail = tail + 1;
1573 io_uring_initialize_sqe(sqe);
1574 sqe
1575}
1576
1577#[must_use]
1581#[inline]
1582pub fn io_uring_buf_ring_mask(ring_entries: u32) -> c_int
1583{
1584 (ring_entries - 1) as _
1585}
1586
1587#[inline]
1588pub unsafe fn io_uring_buf_ring_init(br: *mut io_uring_buf_ring)
1589{
1590 (*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail = 0;
1591}
1592
1593#[inline]
1597pub unsafe fn io_uring_buf_ring_add(br: *mut io_uring_buf_ring, addr: *mut c_void, len: c_uint,
1598 bid: c_ushort, mask: c_int, buf_offset: c_int)
1599{
1600 let tail = (*br).__liburing_anon_1.__liburing_anon_1.as_ref().tail;
1601 let buf = (*br).__liburing_anon_1
1602 .bufs
1603 .as_mut()
1604 .as_mut_ptr()
1605 .add(((i32::from(tail) + buf_offset) & mask) as usize);
1606
1607 (*buf).addr = addr as usize as u64;
1608 (*buf).len = len;
1609 (*buf).bid = bid;
1610}
1611
1612#[inline]
1618pub unsafe fn io_uring_buf_ring_advance(br: *mut io_uring_buf_ring, count: c_int)
1619{
1620 let tail = (*br).__liburing_anon_1.__liburing_anon_1.as_ref().tail;
1621 let new_tail = tail.wrapping_add(count as u16);
1622
1623 io_uring_smp_store_release(&raw mut (*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail,
1624 new_tail);
1625}
1626
1627#[inline]
1628unsafe fn __io_uring_buf_ring_cq_advance(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1629 cq_count: i32, buf_count: c_int)
1630{
1631 io_uring_buf_ring_advance(br, buf_count);
1632 io_uring_cq_advance(ring, cq_count as _);
1633}
1634
1635#[inline]
1643pub unsafe fn io_uring_buf_ring_cq_advance(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1644 count: c_int)
1645{
1646 __io_uring_buf_ring_cq_advance(ring, br, count, count);
1647}
1648
1649#[inline]
1650pub unsafe fn io_uring_buf_ring_available(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1651 bgid: c_ushort)
1652 -> c_int
1653{
1654 let mut head = 0;
1655 let ret = io_uring_buf_ring_head(ring, bgid.into(), &raw mut head);
1656 if ret > 0 {
1657 return ret;
1658 }
1659 c_int::from((*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail - head)
1660}
1661
1662#[inline]
1663pub unsafe fn io_uring_get_sqe(ring: *mut io_uring) -> *mut io_uring_sqe
1664{
1665 _io_uring_get_sqe(ring)
1666}
1667
1668#[inline]
1680pub unsafe fn io_uring_get_sqe128(ring: *mut io_uring) -> *mut io_uring_sqe
1681{
1682 let sq = &raw mut (*ring).sq;
1683
1684 let head = io_uring_load_sq_head(ring);
1685 let mut tail = (*sq).sqe_tail;
1686
1687 if (*ring).flags & IORING_SETUP_SQE128 != 0 {
1688 return io_uring_get_sqe(ring);
1689 }
1690
1691 if (*ring).flags & IORING_SETUP_SQE_MIXED == 0 {
1692 return ptr::null_mut();
1693 }
1694
1695 let mut sqe: *mut io_uring_sqe;
1696 if (tail + 1) & (*sq).ring_mask == 0 {
1697 if (tail + 2) - head >= (*sq).ring_entries {
1698 return ptr::null_mut();
1699 }
1700
1701 sqe = _io_uring_get_sqe(ring);
1702 io_uring_prep_nop(sqe);
1703 (*sqe).flags |= IOSQE_CQE_SKIP_SUCCESS as u8;
1704 tail = (*sq).sqe_tail;
1705 } else if (tail + 1) - head >= (*sq).ring_entries {
1706 return ptr::null_mut();
1707 }
1708
1709 sqe = &raw mut *(*sq).sqes.add((tail & (*sq).ring_mask) as usize);
1710 (*sq).sqe_tail = tail + 2;
1711 io_uring_initialize_sqe(sqe);
1712 sqe
1713}
1714
1715impl From<Duration> for timespec
1718{
1719 #[cfg(not(any(target_arch = "powerpc", target_arch = "arm")))]
1720 #[inline]
1721 fn from(duration: Duration) -> Self
1722 {
1723 let mut ts = unsafe { zeroed::<timespec>() };
1724 ts.tv_sec = duration.as_secs() as _;
1725 ts.tv_nsec = duration.subsec_nanos().into();
1726 ts
1727 }
1728
1729 #[cfg(any(target_arch = "powerpc", target_arch = "arm"))]
1730 #[inline]
1731 fn from(duration: Duration) -> Self
1732 {
1733 let mut ts = unsafe { zeroed::<timespec>() };
1734 ts.tv_sec = duration.as_secs() as _;
1735 ts.tv_nsec = duration.subsec_nanos().try_into().unwrap();
1736 ts
1737 }
1738}
1739
1740impl From<Duration> for __kernel_timespec
1741{
1742 #[inline]
1743 fn from(duration: Duration) -> Self
1744 {
1745 let mut ts = unsafe { zeroed::<__kernel_timespec>() };
1746 ts.tv_sec = duration.as_secs() as _;
1747 ts.tv_nsec = duration.subsec_nanos().into();
1748 ts
1749 }
1750}