Skip to main content

liburing_rs/
lib.rs

1#![no_std]
2#![allow(unsafe_op_in_unsafe_fn, non_snake_case)]
3#![warn(clippy::pedantic)]
4#![allow(clippy::missing_safety_doc,
5         clippy::cast_sign_loss,
6         clippy::similar_names,
7         clippy::cast_possible_truncation,
8         clippy::cast_possible_wrap,
9         clippy::cast_ptr_alignment,
10         clippy::used_underscore_items,
11         clippy::unnecessary_cast)]
12
13mod uring;
14
15use core::{
16    ffi::{c_char, c_int, c_longlong, c_uint, c_ushort, c_void},
17    mem::{self, zeroed},
18    ptr,
19    sync::atomic::{
20        AtomicU16, AtomicU32,
21        Ordering::{self, Acquire, Relaxed, Release},
22    },
23    time::Duration,
24};
25
26pub use uring::*;
27
28const LIBURING_UDATA_TIMEOUT: u64 = u64::MAX;
29
30trait Atomic: Copy
31{
32    unsafe fn store(p: *mut Self, val: Self, order: Ordering);
33    unsafe fn load(p: *mut Self, order: Ordering) -> Self;
34}
35
36impl Atomic for u32
37{
38    #[inline]
39    unsafe fn store(p: *mut u32, val: u32, order: Ordering)
40    {
41        AtomicU32::from_ptr(p).store(val, order);
42    }
43
44    #[inline]
45    unsafe fn load(p: *mut u32, order: Ordering) -> u32
46    {
47        AtomicU32::from_ptr(p).load(order)
48    }
49}
50
51impl Atomic for u16
52{
53    #[inline]
54    unsafe fn store(p: *mut u16, val: u16, order: Ordering)
55    {
56        AtomicU16::from_ptr(p).store(val, order);
57    }
58
59    #[inline]
60    unsafe fn load(p: *mut u16, order: Ordering) -> u16
61    {
62        AtomicU16::from_ptr(p).load(order)
63    }
64}
65
66unsafe fn io_uring_smp_store_release<T: Atomic>(p: *mut T, v: T)
67{
68    Atomic::store(p, v, Release);
69}
70
71unsafe fn io_uring_smp_load_acquire<T: Atomic>(p: *const T) -> T
72{
73    Atomic::load(p.cast_mut(), Acquire)
74}
75
76unsafe fn IO_URING_READ_ONCE<T: Atomic>(var: *const T) -> T
77{
78    Atomic::load(var.cast_mut(), Relaxed)
79}
80
81unsafe fn IO_URING_WRITE_ONCE<T: Atomic>(var: *mut T, val: T)
82{
83    Atomic::store(var, val, Relaxed);
84}
85
86/*
87 * Library interface
88 */
89
90#[must_use]
91#[inline]
92unsafe fn uring_ptr_to_u64(ptr: *const c_void) -> u64
93{
94    ptr as u64
95}
96
97#[inline]
98pub unsafe fn io_uring_opcode_supported(p: *mut io_uring_probe, op: c_int) -> c_int
99{
100    if op > (*p).last_op.into() {
101        return 0;
102    }
103
104    i32::from((*(*p).ops.as_ptr().add(op as _)).flags & IO_URING_OP_SUPPORTED as u16 != 0)
105}
106
107/*
108 * Returns the bit shift needed to index the CQ.
109 * This shift is 1 for rings with big CQEs, and 0 for rings with normal CQEs.
110 * CQE `index` can be computed as &cq.cqes[(index & cq.ring_mask) << cqe_shift].
111 */
112#[must_use]
113#[inline]
114pub fn io_uring_cqe_shift_from_flags(flags: c_uint) -> c_uint
115{
116    u32::from(flags & IORING_SETUP_CQE32 != 0)
117}
118
119#[must_use]
120#[inline]
121pub unsafe fn io_uring_cqe_shift(ring: *const io_uring) -> c_uint
122{
123    io_uring_cqe_shift_from_flags((*ring).flags)
124}
125
126#[must_use]
127#[inline]
128pub unsafe fn io_uring_cqe_nr(cqe: *const io_uring_cqe) -> c_uint
129{
130    let shift = i32::from((*cqe).flags & IORING_CQE_F_32 != 0);
131    1 << shift
132}
133
134#[inline]
135unsafe fn io_uring_cqe_iter_init(ring: *const io_uring) -> io_uring_cqe_iter
136{
137    io_uring_cqe_iter { cqes: (*ring).cq.cqes,
138                        mask: (*ring).cq.ring_mask,
139                        shift: io_uring_cqe_shift(ring),
140                        head: *(*ring).cq.khead,
141                        /* Acquire ordering ensures tail is loaded before any CQEs */
142                        tail: io_uring_smp_load_acquire((*ring).cq.ktail) }
143}
144
145#[inline]
146unsafe fn io_uring_cqe_iter_next(iter: *mut io_uring_cqe_iter, cqe: *mut *mut io_uring_cqe)
147                                 -> bool
148{
149    if (*iter).head == (*iter).tail {
150        return false;
151    }
152
153    let head = (*iter).head;
154    (*iter).head += 1;
155
156    let offset = (head & (*iter).mask) << (*iter).shift;
157    *cqe = (*iter).cqes.add(offset as usize);
158
159    if (*(*cqe)).flags & IORING_CQE_F_32 > 0 {
160        (*iter).head += 1;
161    }
162
163    true
164}
165
166pub unsafe fn io_uring_for_each_cqe<F>(ring: *mut io_uring, mut f: F)
167    where F: FnMut(*mut io_uring_cqe)
168{
169    let mut iter = io_uring_cqe_iter_init(ring);
170    let mut cqe = ptr::null_mut::<io_uring_cqe>();
171    while io_uring_cqe_iter_next(&raw mut iter, &raw mut cqe) {
172        f(cqe);
173    }
174}
175
176/*
177 * Must be called after io_uring_for_each_cqe()
178 */
179#[inline]
180pub unsafe fn io_uring_cq_advance(ring: *mut io_uring, nr: c_uint)
181{
182    if nr > 0 {
183        let cq = &raw mut (*ring).cq;
184
185        /*
186         * Ensure that the kernel only sees the new value of the head
187         * index after the CQEs have been read.
188         */
189        io_uring_smp_store_release((*cq).khead, *(*cq).khead + nr);
190    }
191}
192
193/*
194 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
195 * been processed by the application.
196 */
197#[inline]
198pub unsafe fn io_uring_cqe_seen(ring: *mut io_uring, cqe: *mut io_uring_cqe)
199{
200    if !cqe.is_null() {
201        io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
202    }
203}
204
205/*
206 * Command prep helpers
207 */
208
209/*
210 * Associate pointer @data with the sqe, for later retrieval from the cqe
211 * at command completion time with io_uring_cqe_get_data().
212 */
213#[inline]
214pub unsafe fn io_uring_sqe_set_data(sqe: *mut io_uring_sqe, data: *mut c_void)
215{
216    (*sqe).user_data = data as u64;
217}
218
219#[must_use]
220#[inline]
221pub unsafe fn io_uring_cqe_get_data(cqe: *const io_uring_cqe) -> *mut c_void
222{
223    (*cqe).user_data as *mut c_void
224}
225
226/*
227 * Assign a 64-bit value to this sqe, which can get retrieved at completion
228 * time with io_uring_cqe_get_data64. Just like the non-64 variants, except
229 * these store a 64-bit type rather than a data pointer.
230 */
231#[inline]
232pub unsafe fn io_uring_sqe_set_data64(sqe: *mut io_uring_sqe, data: u64)
233{
234    (*sqe).user_data = data;
235}
236
237#[must_use]
238#[inline]
239pub unsafe fn io_uring_cqe_get_data64(cqe: *const io_uring_cqe) -> u64
240{
241    (*cqe).user_data
242}
243
244#[inline]
245pub unsafe fn io_uring_sqe_set_flags(sqe: *mut io_uring_sqe, flags: c_uint)
246{
247    (*sqe).flags = flags as u8;
248}
249
250#[inline]
251pub unsafe fn io_uring_sqe_set_buf_group(sqe: *mut io_uring_sqe, bgid: c_int)
252{
253    (*sqe).__liburing_anon_4.buf_group = bgid as u16;
254}
255
256#[inline]
257unsafe fn __io_uring_set_target_fixed_file(sqe: *mut io_uring_sqe, file_index: c_uint)
258{
259    /* 0 means no fixed files, indexes should be encoded as "index + 1" */
260    (*sqe).__liburing_anon_5.file_index = file_index + 1;
261}
262
263#[inline]
264pub unsafe fn io_uring_initialize_sqe(sqe: *mut io_uring_sqe)
265{
266    (*sqe).flags = 0;
267    (*sqe).ioprio = 0;
268    (*sqe).__liburing_anon_3.rw_flags = 0;
269    (*sqe).__liburing_anon_4.buf_index = 0;
270    (*sqe).personality = 0;
271    (*sqe).__liburing_anon_5.file_index = 0;
272    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = 0;
273    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().__pad2[0] = 0;
274}
275
276#[inline]
277pub unsafe fn io_uring_prep_rw(op: c_uint, sqe: *mut io_uring_sqe, fd: c_int, addr: *const c_void,
278                               len: c_uint, offset: __u64)
279{
280    (*sqe).opcode = op as u8;
281    (*sqe).fd = fd;
282    (*sqe).__liburing_anon_1.off = offset;
283    (*sqe).__liburing_anon_2.addr = addr as u64;
284    (*sqe).len = len;
285}
286
287/*
288 * io_uring_prep_splice() - Either @fd_in or @fd_out must be a pipe.
289 *
290 * - If @fd_in refers to a pipe, @off_in is ignored and must be set to -1.
291 *
292 * - If @fd_in does not refer to a pipe and @off_in is -1, then @nbytes are read
293 *   from @fd_in starting from the file offset, which is incremented by the
294 *   number of bytes read.
295 *
296 * - If @fd_in does not refer to a pipe and @off_in is not -1, then the starting
297 *   offset of @fd_in will be @off_in.
298 *
299 * This splice operation can be used to implement sendfile by splicing to an
300 * intermediate pipe first, then splice to the final destination.
301 * In fact, the implementation of sendfile in kernel uses splice internally.
302 *
303 * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation
304 * can still fail with EINVAL if one of the fd doesn't explicitly support splice
305 * operation, e.g. reading from terminal is unsupported from kernel 5.7 to 5.11.
306 * Check issue #291 for more information.
307 */
308#[inline]
309pub unsafe fn io_uring_prep_splice(sqe: *mut io_uring_sqe, fd_in: c_int, off_in: i64,
310                                   fd_out: c_int, off_out: i64, nbytes: c_uint,
311                                   splice_flags: c_uint)
312{
313    io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, ptr::null_mut(), nbytes, off_out as u64);
314    (*sqe).__liburing_anon_2.splice_off_in = off_in as u64;
315    (*sqe).__liburing_anon_5.splice_fd_in = fd_in;
316    (*sqe).__liburing_anon_3.splice_flags = splice_flags;
317}
318
319#[inline]
320pub unsafe fn io_uring_prep_tee(sqe: *mut io_uring_sqe, fd_in: c_int, fd_out: c_int,
321                                nbytes: c_uint, splice_flags: c_uint)
322{
323    io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, ptr::null_mut(), nbytes, 0);
324    (*sqe).__liburing_anon_2.splice_off_in = 0;
325    (*sqe).__liburing_anon_5.splice_fd_in = fd_in;
326    (*sqe).__liburing_anon_3.splice_flags = splice_flags;
327}
328
329#[inline]
330pub unsafe fn io_uring_prep_readv(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
331                                  nr_vecs: c_uint, offset: u64)
332{
333    io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs.cast(), nr_vecs, offset);
334}
335
336#[inline]
337pub unsafe fn io_uring_prep_readv2(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
338                                   nr_vecs: c_uint, offset: u64, flags: c_int)
339{
340    io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
341    (*sqe).__liburing_anon_3.rw_flags = flags;
342}
343
344#[inline]
345pub unsafe fn io_uring_prep_read_fixed(sqe: *mut io_uring_sqe, fd: c_int, buf: *mut c_void,
346                                       nbytes: c_uint, offset: u64, buf_index: c_int)
347{
348    io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
349    (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
350}
351
352#[inline]
353pub unsafe fn io_uring_prep_readv_fixed(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
354                                        nr_vecs: c_uint, offset: u64, flags: c_int,
355                                        buf_index: c_int)
356{
357    io_uring_prep_readv2(sqe, fd, iovecs, nr_vecs, offset, flags);
358    (*sqe).opcode = IORING_OP_READV_FIXED as _;
359    (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
360}
361
362#[inline]
363pub unsafe fn io_uring_prep_writev(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
364                                   nr_vecs: c_uint, offset: u64)
365{
366    io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs.cast(), nr_vecs, offset);
367}
368
369#[inline]
370pub unsafe fn io_uring_prep_writev2(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
371                                    nr_vecs: c_uint, offset: u64, flags: c_int)
372{
373    io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
374    (*sqe).__liburing_anon_3.rw_flags = flags;
375}
376
377#[inline]
378pub unsafe fn io_uring_prep_write_fixed(sqe: *mut io_uring_sqe, fd: c_int, buf: *const c_void,
379                                        nbytes: c_uint, offset: u64, buf_index: c_int)
380{
381    io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
382    (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
383}
384
385#[inline]
386pub unsafe fn io_uring_prep_writev_fixed(sqe: *mut io_uring_sqe, fd: c_int, iovecs: *const iovec,
387                                         nr_vecs: c_uint, offset: u64, flags: c_int,
388                                         buf_index: c_int)
389{
390    io_uring_prep_writev2(sqe, fd, iovecs, nr_vecs, offset, flags);
391    (*sqe).opcode = IORING_OP_WRITEV_FIXED as _;
392    (*sqe).__liburing_anon_4.buf_index = buf_index as u16;
393}
394
395#[inline]
396pub unsafe fn io_uring_prep_recvmsg(sqe: *mut io_uring_sqe, fd: c_int, msg: *mut msghdr,
397                                    flags: c_uint)
398{
399    io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg.cast(), 1, 0);
400    (*sqe).__liburing_anon_3.msg_flags = flags;
401}
402
403#[inline]
404pub unsafe fn io_uring_prep_recvmsg_multishot(sqe: *mut io_uring_sqe, fd: c_int, msg: *mut msghdr,
405                                              flags: c_uint)
406{
407    io_uring_prep_recvmsg(sqe, fd, msg, flags);
408    (*sqe).ioprio |= IORING_RECV_MULTISHOT as u16;
409}
410
411#[inline]
412pub unsafe fn io_uring_prep_sendmsg(sqe: *mut io_uring_sqe, fd: c_int, msg: *const msghdr,
413                                    flags: c_uint)
414{
415    io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg.cast(), 1, 0);
416    (*sqe).__liburing_anon_3.msg_flags = flags;
417}
418
419#[must_use]
420#[inline]
421pub fn __io_uring_prep_poll_mask(poll_mask: c_uint) -> c_uint
422{
423    poll_mask.to_le()
424}
425
426#[inline]
427pub unsafe fn io_uring_prep_poll_add(sqe: *mut io_uring_sqe, fd: c_int, poll_mask: c_uint)
428{
429    io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, ptr::null_mut(), 0, 0);
430    (*sqe).__liburing_anon_3.poll32_events = __io_uring_prep_poll_mask(poll_mask);
431}
432
433#[inline]
434pub unsafe fn io_uring_prep_poll_multishot(sqe: *mut io_uring_sqe, fd: c_int, poll_mask: c_uint)
435{
436    io_uring_prep_poll_add(sqe, fd, poll_mask);
437    (*sqe).len = IORING_POLL_ADD_MULTI;
438}
439
440#[inline]
441pub unsafe fn io_uring_prep_poll_remove(sqe: *mut io_uring_sqe, user_data: u64)
442{
443    io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, ptr::null_mut(), 0, 0);
444    (*sqe).__liburing_anon_2.addr = user_data;
445}
446
447#[inline]
448pub unsafe fn io_uring_prep_poll_update(sqe: *mut io_uring_sqe, old_user_data: u64,
449                                        new_user_data: u64, poll_mask: c_uint, flags: c_uint)
450{
451    io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, ptr::null_mut(), flags, new_user_data);
452    (*sqe).__liburing_anon_2.addr = old_user_data;
453    (*sqe).__liburing_anon_3.poll32_events = __io_uring_prep_poll_mask(poll_mask);
454}
455
456#[inline]
457pub unsafe fn io_uring_prep_fsync(sqe: *mut io_uring_sqe, fd: c_int, fsync_flags: c_uint)
458{
459    io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, ptr::null_mut(), 0, 0);
460    (*sqe).__liburing_anon_3.fsync_flags = fsync_flags;
461}
462
463#[inline]
464pub unsafe fn io_uring_prep_nop(sqe: *mut io_uring_sqe)
465{
466    io_uring_prep_rw(IORING_OP_NOP, sqe, -1, ptr::null_mut(), 0, 0);
467}
468
469#[inline]
470pub unsafe fn io_uring_prep_nop128(sqe: *mut io_uring_sqe)
471{
472    io_uring_prep_rw(IORING_OP_NOP128, sqe, -1, ptr::null_mut(), 0, 0);
473}
474
475#[inline]
476pub unsafe fn io_uring_prep_timeout(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
477                                    count: c_uint, flags: c_uint)
478{
479    io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts.cast(), 1, count.into());
480    (*sqe).__liburing_anon_3.timeout_flags = flags;
481}
482
483#[inline]
484pub unsafe fn io_uring_prep_timeout_remove(sqe: *mut io_uring_sqe, user_data: __u64, flags: c_uint)
485{
486    io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, ptr::null_mut(), 0, 0);
487    (*sqe).__liburing_anon_2.addr = user_data;
488    (*sqe).__liburing_anon_3.timeout_flags = flags;
489}
490
491#[inline]
492pub unsafe fn io_uring_prep_timeout_update(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
493                                           user_data: __u64, flags: c_uint)
494{
495    io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, ptr::null_mut(), 0, ts as u64);
496    (*sqe).__liburing_anon_2.addr = user_data;
497    (*sqe).__liburing_anon_3.timeout_flags = flags | IORING_TIMEOUT_UPDATE;
498}
499
500#[inline]
501pub unsafe fn io_uring_prep_accept(sqe: *mut io_uring_sqe, fd: c_int, addr: *mut sockaddr,
502                                   addrlen: *mut socklen_t, flags: c_int)
503{
504    io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr.cast(), 0, uring_ptr_to_u64(addrlen.cast()));
505    (*sqe).__liburing_anon_3.accept_flags = flags as u32;
506}
507
508/* accept directly into the fixed file table */
509#[inline]
510pub unsafe fn io_uring_prep_accept_direct(sqe: *mut io_uring_sqe, fd: c_int, addr: *mut sockaddr,
511                                          addrlen: *mut socklen_t, flags: c_int,
512                                          mut file_index: c_uint)
513{
514    io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
515    /* offset by 1 for allocation */
516    if file_index == IORING_FILE_INDEX_ALLOC as _ {
517        file_index -= 1;
518    }
519    __io_uring_set_target_fixed_file(sqe, file_index);
520}
521
522#[inline]
523pub unsafe fn io_uring_prep_multishot_accept(sqe: *mut io_uring_sqe, fd: c_int,
524                                             addr: *mut sockaddr, addrlen: *mut socklen_t,
525                                             flags: c_int)
526{
527    io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
528    (*sqe).ioprio |= IORING_ACCEPT_MULTISHOT as u16;
529}
530
531/* multishot accept directly into the fixed file table */
532#[inline]
533pub unsafe fn io_uring_prep_multishot_accept_direct(sqe: *mut io_uring_sqe, fd: c_int,
534                                                    addr: *mut sockaddr, addrlen: *mut socklen_t,
535                                                    flags: c_int)
536{
537    io_uring_prep_multishot_accept(sqe, fd, addr, addrlen, flags);
538    __io_uring_set_target_fixed_file(sqe, (IORING_FILE_INDEX_ALLOC - 1) as u32);
539}
540
541#[inline]
542pub unsafe fn io_uring_prep_cancel64(sqe: *mut io_uring_sqe, user_data: u64, flags: c_int)
543{
544    io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, ptr::null_mut(), 0, 0);
545    (*sqe).__liburing_anon_2.addr = user_data;
546    (*sqe).__liburing_anon_3.cancel_flags = flags as u32;
547}
548
549#[inline]
550pub unsafe fn io_uring_prep_cancel(sqe: *mut io_uring_sqe, user_data: *const c_void, flags: c_int)
551{
552    io_uring_prep_cancel64(sqe, user_data as usize as u64, flags);
553}
554
555#[inline]
556pub unsafe fn io_uring_prep_cancel_fd(sqe: *mut io_uring_sqe, fd: c_int, flags: c_uint)
557{
558    io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, fd, ptr::null_mut(), 0, 0);
559    (*sqe).__liburing_anon_3.cancel_flags = flags | IORING_ASYNC_CANCEL_FD;
560}
561
562#[inline]
563pub unsafe fn io_uring_prep_link_timeout(sqe: *mut io_uring_sqe, ts: *const __kernel_timespec,
564                                         flags: c_uint)
565{
566    io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts.cast(), 1, 0);
567    (*sqe).__liburing_anon_3.timeout_flags = flags;
568}
569
570#[inline]
571pub unsafe fn io_uring_prep_connect(sqe: *mut io_uring_sqe, fd: c_int, addr: *const sockaddr,
572                                    addrlen: socklen_t)
573{
574    io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr.cast(), 0, addrlen.into());
575}
576
577#[inline]
578pub unsafe fn io_uring_prep_bind(sqe: *mut io_uring_sqe, fd: c_int, addr: *const sockaddr,
579                                 addrlen: socklen_t)
580{
581    io_uring_prep_rw(IORING_OP_BIND, sqe, fd, addr.cast(), 0, addrlen.into());
582}
583
584#[inline]
585pub unsafe fn io_uring_prep_listen(sqe: *mut io_uring_sqe, fd: c_int, backlog: c_int)
586{
587    io_uring_prep_rw(IORING_OP_LISTEN, sqe, fd, ptr::null_mut(), backlog as _, 0);
588}
589
590#[inline]
591pub unsafe fn io_uring_prep_epoll_wait(sqe: *mut io_uring_sqe, fd: c_int,
592                                       events: *mut epoll_event, maxevents: c_int, flags: c_uint)
593{
594    io_uring_prep_rw(IORING_OP_EPOLL_WAIT, sqe, fd, events.cast(), maxevents as _, 0);
595    (*sqe).__liburing_anon_3.rw_flags = flags as _;
596}
597
598#[inline]
599pub unsafe fn io_uring_prep_files_update(sqe: *mut io_uring_sqe, fds: *mut c_int, nr_fds: c_uint,
600                                         offset: c_int)
601{
602    io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds.cast(), nr_fds, offset as u64);
603}
604
605#[inline]
606pub unsafe fn io_uring_prep_fallocate(sqe: *mut io_uring_sqe, fd: c_int, mode: c_int, offset: u64,
607                                      len: u64)
608{
609    io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd, ptr::null_mut(), mode as c_uint, offset);
610    (*sqe).__liburing_anon_2.addr = len;
611}
612
613#[inline]
614pub unsafe fn io_uring_prep_openat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
615                                   flags: c_int, mode: mode_t)
616{
617    io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path.cast(), mode, 0);
618    (*sqe).__liburing_anon_3.open_flags = flags as u32;
619}
620
621/* open directly into the fixed file table */
622#[inline]
623pub unsafe fn io_uring_prep_openat_direct(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
624                                          flags: c_int, mode: mode_t, mut file_index: c_uint)
625{
626    io_uring_prep_openat(sqe, dfd, path, flags, mode);
627    /* offset by 1 for allocation */
628    if file_index == IORING_FILE_INDEX_ALLOC as _ {
629        file_index -= 1;
630    }
631    __io_uring_set_target_fixed_file(sqe, file_index);
632}
633
634#[inline]
635pub unsafe fn io_uring_prep_open(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int,
636                                 mode: mode_t)
637{
638    io_uring_prep_openat(sqe, AT_FDCWD, path, flags, mode);
639}
640
641/* open directly into the fixed file table */
642#[inline]
643pub unsafe fn io_uring_prep_open_direct(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int,
644                                        mode: mode_t, file_index: c_uint)
645{
646    io_uring_prep_openat_direct(sqe, AT_FDCWD, path, flags, mode, file_index);
647}
648
649#[inline]
650pub unsafe fn io_uring_prep_close(sqe: *mut io_uring_sqe, fd: c_int)
651{
652    io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, ptr::null_mut(), 0, 0);
653}
654
655#[inline]
656pub unsafe fn io_uring_prep_close_direct(sqe: *mut io_uring_sqe, file_index: c_uint)
657{
658    io_uring_prep_close(sqe, 0);
659    __io_uring_set_target_fixed_file(sqe, file_index);
660}
661
662#[inline]
663pub unsafe fn io_uring_prep_read(sqe: *mut io_uring_sqe, fd: c_int, buf: *mut c_void,
664                                 nbytes: c_uint, offset: u64)
665{
666    io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
667}
668
669#[inline]
670pub unsafe fn io_uring_prep_read_multishot(sqe: *mut io_uring_sqe, fd: c_int, nbytes: c_uint,
671                                           offset: u64, buf_group: c_int)
672{
673    io_uring_prep_rw(IORING_OP_READ_MULTISHOT, sqe, fd, ptr::null_mut(), nbytes, offset);
674    (*sqe).__liburing_anon_4.buf_group = buf_group as _;
675    (*sqe).flags = IOSQE_BUFFER_SELECT as _;
676}
677
678#[inline]
679pub unsafe fn io_uring_prep_write(sqe: *mut io_uring_sqe, fd: c_int, buf: *const c_void,
680                                  nbytes: c_uint, offset: u64)
681{
682    io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
683}
684
685#[inline]
686pub unsafe fn io_uring_prep_statx(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
687                                  flags: c_int, mask: c_uint, statxbuf: *mut statx)
688{
689    io_uring_prep_rw(IORING_OP_STATX,
690                     sqe,
691                     dfd,
692                     path.cast(),
693                     mask,
694                     uring_ptr_to_u64(statxbuf.cast()));
695    (*sqe).__liburing_anon_3.statx_flags = flags as u32;
696}
697
698#[inline]
699pub unsafe fn io_uring_prep_fadvise(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, len: u32,
700                                    advice: c_int)
701{
702    io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, ptr::null_mut(), len, offset);
703    (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
704}
705
706#[inline]
707pub unsafe fn io_uring_prep_madvise(sqe: *mut io_uring_sqe, addr: *mut c_void, length: u32,
708                                    advice: c_int)
709{
710    io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, length, 0);
711    (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
712}
713
714#[inline]
715pub unsafe fn io_uring_prep_fadvise64(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, len: off_t,
716                                      advice: c_int)
717{
718    io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, ptr::null_mut(), 0, offset);
719    (*sqe).__liburing_anon_2.addr = len as _;
720    (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
721}
722
723#[inline]
724pub unsafe fn io_uring_prep_madvise64(sqe: *mut io_uring_sqe, addr: *mut c_void, length: off_t,
725                                      advice: c_int)
726{
727    io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, 0, length as _);
728    (*sqe).__liburing_anon_3.fadvise_advice = advice as u32;
729}
730
731#[inline]
732pub unsafe fn io_uring_prep_send(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
733                                 len: usize, flags: c_int)
734{
735    io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, len as u32, 0);
736    (*sqe).__liburing_anon_3.msg_flags = flags as u32;
737}
738
739#[inline]
740pub unsafe fn io_uring_prep_send_bundle(sqe: *mut io_uring_sqe, sockfd: c_int, len: usize,
741                                        flags: c_int)
742{
743    io_uring_prep_send(sqe, sockfd, ptr::null_mut(), len, flags);
744    (*sqe).ioprio |= IORING_RECVSEND_BUNDLE as u16;
745}
746
747#[inline]
748pub unsafe fn io_uring_prep_send_set_addr(sqe: *mut io_uring_sqe, dest_addr: *const sockaddr,
749                                          addr_len: u16)
750{
751    (*sqe).__liburing_anon_1.addr2 = dest_addr as usize as u64;
752    (*sqe).__liburing_anon_5.__liburing_anon_1.addr_len = addr_len;
753}
754
755#[inline]
756pub unsafe fn io_uring_prep_sendto(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
757                                   len: usize, flags: c_int, addr: *const sockaddr,
758                                   addrlen: socklen_t)
759{
760    io_uring_prep_send(sqe, sockfd, buf, len, flags);
761    io_uring_prep_send_set_addr(sqe, addr, addrlen as _);
762}
763
764#[inline]
765pub unsafe fn io_uring_prep_send_zc(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *const c_void,
766                                    len: usize, flags: c_int, zc_flags: c_uint)
767{
768    io_uring_prep_rw(IORING_OP_SEND_ZC, sqe, sockfd, buf, len as u32, 0);
769    (*sqe).__liburing_anon_3.msg_flags = flags as u32;
770    (*sqe).ioprio = zc_flags as _;
771}
772
773#[inline]
774pub unsafe fn io_uring_prep_send_zc_fixed(sqe: *mut io_uring_sqe, sockfd: c_int,
775                                          buf: *const c_void, len: usize, flags: c_int,
776                                          zc_flags: c_uint, buf_index: c_uint)
777{
778    io_uring_prep_send_zc(sqe, sockfd, buf, len, flags, zc_flags);
779    (*sqe).ioprio |= IORING_RECVSEND_FIXED_BUF as u16;
780    (*sqe).__liburing_anon_4.buf_index = buf_index as _;
781}
782
783#[inline]
784pub unsafe fn io_uring_prep_sendmsg_zc(sqe: *mut io_uring_sqe, fd: c_int, msg: *const msghdr,
785                                       flags: c_uint)
786{
787    io_uring_prep_sendmsg(sqe, fd, msg, flags);
788    (*sqe).opcode = IORING_OP_SENDMSG_ZC as _;
789}
790
791#[inline]
792pub unsafe fn io_uring_prep_sendmsg_zc_fixed(sqe: *mut io_uring_sqe, fd: c_int,
793                                             msg: *const msghdr, flags: c_uint, buf_index: c_uint)
794{
795    io_uring_prep_sendmsg_zc(sqe, fd, msg, flags);
796    (*sqe).ioprio |= IORING_RECVSEND_FIXED_BUF as u16;
797    (*sqe).__liburing_anon_4.buf_index = buf_index as _;
798}
799
800#[inline]
801pub unsafe fn io_uring_prep_recv(sqe: *mut io_uring_sqe, sockfd: c_int, buf: *mut c_void,
802                                 len: usize, flags: c_int)
803{
804    io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, len as u32, 0);
805    (*sqe).__liburing_anon_3.msg_flags = flags as u32;
806}
807
808#[inline]
809pub unsafe fn io_uring_prep_recv_multishot(sqe: *mut io_uring_sqe, sockfd: c_int,
810                                           buf: *mut c_void, len: usize, flags: c_int)
811{
812    io_uring_prep_recv(sqe, sockfd, buf, len, flags);
813    (*sqe).ioprio |= IORING_RECV_MULTISHOT as u16;
814}
815
816#[inline]
817pub unsafe fn io_uring_recvmsg_validate(buf: *mut c_void, buf_len: c_int, msgh: *mut msghdr)
818                                        -> *mut io_uring_recvmsg_out
819{
820    let header = (*msgh).msg_controllen as usize
821                 + (*msgh).msg_namelen as usize
822                 + mem::size_of::<io_uring_recvmsg_out>();
823
824    if buf_len < 0 || (buf_len as usize) < header {
825        return ptr::null_mut();
826    }
827
828    buf.cast()
829}
830
831#[inline]
832pub unsafe fn io_uring_recvmsg_name(o: *mut io_uring_recvmsg_out) -> *mut c_void
833{
834    o.add(1).cast()
835}
836
837#[inline]
838pub unsafe fn io_uring_recvmsg_cmsg_firsthdr(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr)
839                                             -> *mut cmsghdr
840{
841    if ((*o).controllen as usize) < mem::size_of::<cmsghdr>() {
842        return ptr::null_mut();
843    }
844
845    io_uring_recvmsg_name(o).cast::<u8>()
846                            .add((*msgh).msg_namelen as _)
847                            .cast()
848}
849
850#[inline]
851pub unsafe fn io_uring_recvmsg_cmsg_nexthdr(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr,
852                                            cmsg: *mut cmsghdr)
853                                            -> *mut cmsghdr
854{
855    #[allow(non_snake_case)]
856    fn CMSG_ALIGN(len: usize) -> usize
857    {
858        ((len) + mem::size_of::<usize>() - 1) & !(mem::size_of::<usize>() - 1)
859    }
860
861    if ((*cmsg).cmsg_len as usize) < mem::size_of::<cmsghdr>() {
862        return ptr::null_mut();
863    }
864
865    let end = io_uring_recvmsg_cmsg_firsthdr(o, msgh).cast::<u8>()
866                                                     .add((*o).controllen as _);
867
868    let cmsg = cmsg.cast::<u8>()
869                   .add(CMSG_ALIGN((*cmsg).cmsg_len as usize))
870                   .cast::<cmsghdr>();
871
872    if cmsg.add(1).cast::<u8>() > end {
873        return ptr::null_mut();
874    }
875
876    if cmsg.cast::<u8>().add(CMSG_ALIGN((*cmsg).cmsg_len as usize)) > end {
877        return ptr::null_mut();
878    }
879
880    cmsg
881}
882
883#[inline]
884pub unsafe fn io_uring_recvmsg_payload(o: *mut io_uring_recvmsg_out, msgh: *mut msghdr)
885                                       -> *mut c_void
886{
887    io_uring_recvmsg_name(o).cast::<u8>()
888                            .add((*msgh).msg_namelen as usize + (*msgh).msg_controllen as usize)
889                            .cast::<c_void>()
890}
891
892#[inline]
893pub unsafe fn io_uring_recvmsg_payload_length(o: *mut io_uring_recvmsg_out, buf_len: c_int,
894                                              msgh: *mut msghdr)
895                                              -> c_uint
896{
897    let payload_start = io_uring_recvmsg_payload(o, msgh) as usize;
898    let payload_end = o as usize + buf_len as usize;
899    (payload_end - payload_start) as _
900}
901
902#[inline]
903pub unsafe fn io_uring_prep_openat2(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
904                                    how: *const open_how)
905{
906    io_uring_prep_rw(IORING_OP_OPENAT2 as _,
907                     sqe,
908                     dfd,
909                     path.cast(),
910                     mem::size_of::<open_how>() as u32,
911                     how as usize as u64);
912}
913
914/* open directly into the fixed file table */
915#[inline]
916pub unsafe fn io_uring_prep_openat2_direct(sqe: *mut io_uring_sqe, dfd: c_int,
917                                           path: *const c_char, how: *const open_how,
918                                           mut file_index: c_uint)
919{
920    io_uring_prep_openat2(sqe, dfd, path, how);
921    /* offset by 1 for allocation */
922    if file_index == IORING_FILE_INDEX_ALLOC as _ {
923        file_index -= 1;
924    }
925    __io_uring_set_target_fixed_file(sqe, file_index);
926}
927
928#[inline]
929pub unsafe fn io_uring_prep_epoll_ctl(sqe: *mut io_uring_sqe, epfd: c_int, fd: c_int, op: c_int,
930                                      ev: *const epoll_event)
931{
932    io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev.cast(), op as u32, u64::from(fd as u32));
933}
934
935#[inline]
936pub unsafe fn io_uring_prep_provide_buffers(sqe: *mut io_uring_sqe, addr: *mut c_void, len: c_int,
937                                            nr: c_int, bgid: c_int, bid: c_int)
938{
939    io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, len as u32, bid as u64);
940    (*sqe).__liburing_anon_4.buf_group = bgid as u16;
941}
942
943#[inline]
944pub unsafe fn io_uring_prep_remove_buffers(sqe: *mut io_uring_sqe, nr: c_int, bgid: c_int)
945{
946    io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, ptr::null_mut(), 0, 0);
947    (*sqe).__liburing_anon_4.buf_group = bgid as u16;
948}
949
950#[inline]
951pub unsafe fn io_uring_prep_shutdown(sqe: *mut io_uring_sqe, fd: c_int, how: c_int)
952{
953    io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, ptr::null_mut(), how as u32, 0);
954}
955
956#[inline]
957pub unsafe fn io_uring_prep_unlinkat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
958                                     flags: c_int)
959{
960    io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path.cast(), 0, 0);
961    (*sqe).__liburing_anon_3.unlink_flags = flags as u32;
962}
963
964#[inline]
965pub unsafe fn io_uring_prep_unlink(sqe: *mut io_uring_sqe, path: *const c_char, flags: c_int)
966{
967    io_uring_prep_unlinkat(sqe, AT_FDCWD, path, flags);
968}
969
970#[inline]
971pub unsafe fn io_uring_prep_renameat(sqe: *mut io_uring_sqe, olddfd: c_int,
972                                     oldpath: *const c_char, newdfd: c_int,
973                                     newpath: *const c_char, flags: c_uint)
974{
975    io_uring_prep_rw(IORING_OP_RENAMEAT,
976                     sqe,
977                     olddfd,
978                     oldpath.cast(),
979                     newdfd as u32,
980                     newpath as usize as u64);
981    (*sqe).__liburing_anon_3.rename_flags = flags;
982}
983
984#[inline]
985pub unsafe fn io_uring_prep_rename(sqe: *mut io_uring_sqe, oldpath: *const c_char,
986                                   newpath: *const c_char)
987{
988    io_uring_prep_renameat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, 0);
989}
990
991#[inline]
992pub unsafe fn io_uring_prep_sync_file_range(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint,
993                                            offset: u64, flags: c_int)
994{
995    io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, ptr::null_mut(), len, offset);
996    (*sqe).__liburing_anon_3.sync_range_flags = flags as u32;
997}
998
999#[inline]
1000pub unsafe fn io_uring_prep_mkdirat(sqe: *mut io_uring_sqe, dfd: c_int, path: *const c_char,
1001                                    mode: mode_t)
1002{
1003    io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path.cast(), mode, 0);
1004}
1005
1006#[inline]
1007pub unsafe fn io_uring_prep_mkdir(sqe: *mut io_uring_sqe, path: *const c_char, mode: mode_t)
1008{
1009    io_uring_prep_mkdirat(sqe, AT_FDCWD, path, mode);
1010}
1011
1012#[inline]
1013pub unsafe fn io_uring_prep_symlinkat(sqe: *mut io_uring_sqe, target: *const c_char,
1014                                      newdirfd: c_int, linkpath: *const c_char)
1015{
1016    io_uring_prep_rw(IORING_OP_SYMLINKAT,
1017                     sqe,
1018                     newdirfd,
1019                     target.cast(),
1020                     0,
1021                     linkpath as usize as u64);
1022}
1023#[inline]
1024pub unsafe fn io_uring_prep_symlink(sqe: *mut io_uring_sqe, target: *const c_char,
1025                                    linkpath: *const c_char)
1026{
1027    io_uring_prep_symlinkat(sqe, target, AT_FDCWD, linkpath);
1028}
1029
1030#[inline]
1031pub unsafe fn io_uring_prep_linkat(sqe: *mut io_uring_sqe, olddfd: c_int, oldpath: *const c_char,
1032                                   newdfd: c_int, newpath: *const c_char, flags: c_int)
1033{
1034    io_uring_prep_rw(IORING_OP_LINKAT,
1035                     sqe,
1036                     olddfd,
1037                     oldpath.cast(),
1038                     newdfd as u32,
1039                     newpath as usize as u64);
1040    (*sqe).__liburing_anon_3.hardlink_flags = flags as u32;
1041}
1042
1043#[inline]
1044pub unsafe fn io_uring_prep_link(sqe: *mut io_uring_sqe, oldpath: *const c_char,
1045                                 newpath: *const c_char, flags: c_int)
1046{
1047    io_uring_prep_linkat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, flags);
1048}
1049
1050#[inline]
1051pub unsafe fn io_uring_prep_msg_ring_cqe_flags(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint,
1052                                               data: u64, flags: c_uint, cqe_flags: c_uint)
1053{
1054    io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, ptr::null_mut(), len, data);
1055    (*sqe).__liburing_anon_3.msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
1056    (*sqe).__liburing_anon_5.file_index = cqe_flags;
1057}
1058
1059#[inline]
1060pub unsafe fn io_uring_prep_msg_ring(sqe: *mut io_uring_sqe, fd: c_int, len: c_uint, data: u64,
1061                                     flags: c_uint)
1062{
1063    io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, ptr::null_mut(), len, data);
1064    (*sqe).__liburing_anon_3.msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
1065}
1066
1067#[inline]
1068pub unsafe fn io_uring_prep_msg_ring_fd(sqe: *mut io_uring_sqe, fd: c_int, source_fd: c_int,
1069                                        mut target_fd: c_int, data: u64, flags: c_uint)
1070{
1071    io_uring_prep_rw(IORING_OP_MSG_RING,
1072                     sqe,
1073                     fd,
1074                     IORING_MSG_SEND_FD as usize as *const c_void,
1075                     0,
1076                     data);
1077    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = source_fd as _;
1078    /* offset by 1 for allocation */
1079    if target_fd == IORING_FILE_INDEX_ALLOC as _ {
1080        target_fd -= 1;
1081    }
1082    __io_uring_set_target_fixed_file(sqe, target_fd as _);
1083    (*sqe).__liburing_anon_3.msg_ring_flags = flags;
1084}
1085
1086#[inline]
1087pub unsafe fn io_uring_prep_msg_ring_fd_alloc(sqe: *mut io_uring_sqe, fd: c_int, source_fd: c_int,
1088                                              data: u64, flags: c_uint)
1089{
1090    io_uring_prep_msg_ring_fd(sqe, fd, source_fd, IORING_FILE_INDEX_ALLOC, data, flags);
1091}
1092
1093#[inline]
1094pub unsafe fn io_uring_prep_getxattr(sqe: *mut io_uring_sqe, name: *const c_char,
1095                                     value: *mut c_char, path: *const c_char, len: c_uint)
1096{
1097    io_uring_prep_rw(IORING_OP_GETXATTR, sqe, 0, name.cast(), len, value as usize as u64);
1098    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = path as usize as u64;
1099
1100    (*sqe).__liburing_anon_3.xattr_flags = 0;
1101}
1102
1103#[inline]
1104pub unsafe fn io_uring_prep_setxattr(sqe: *mut io_uring_sqe, name: *const c_char,
1105                                     value: *const c_char, path: *const c_char, flags: c_int,
1106                                     len: c_uint)
1107{
1108    io_uring_prep_rw(IORING_OP_SETXATTR, sqe, 0, name.cast(), len, value as usize as u64);
1109    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = path as usize as u64;
1110    (*sqe).__liburing_anon_3.xattr_flags = flags as _;
1111}
1112
1113#[inline]
1114pub unsafe fn io_uring_prep_fgetxattr(sqe: *mut io_uring_sqe, fd: c_int, name: *const c_char,
1115                                      value: *mut c_char, len: c_uint)
1116{
1117    io_uring_prep_rw(IORING_OP_FGETXATTR, sqe, fd, name.cast(), len, value as usize as u64);
1118    (*sqe).__liburing_anon_3.xattr_flags = 0;
1119}
1120
1121#[inline]
1122pub unsafe fn io_uring_prep_fsetxattr(sqe: *mut io_uring_sqe, fd: c_int, name: *const c_char,
1123                                      value: *mut c_char, flags: c_int, len: c_uint)
1124{
1125    io_uring_prep_rw(IORING_OP_FSETXATTR, sqe, fd, name.cast(), len, value as usize as u64);
1126    (*sqe).__liburing_anon_3.xattr_flags = flags as _;
1127}
1128
1129#[inline]
1130pub unsafe fn io_uring_prep_socket(sqe: *mut io_uring_sqe, domain: c_int, r#type: c_int,
1131                                   protocol: c_int, flags: c_uint)
1132{
1133    io_uring_prep_rw(IORING_OP_SOCKET,
1134                     sqe,
1135                     domain,
1136                     ptr::null_mut(),
1137                     protocol as u32,
1138                     r#type as u64);
1139    (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1140}
1141
1142#[inline]
1143pub unsafe fn io_uring_prep_socket_direct(sqe: *mut io_uring_sqe, domain: c_int, r#type: c_int,
1144                                          protocol: c_int, mut file_index: c_uint, flags: c_uint)
1145{
1146    io_uring_prep_rw(IORING_OP_SOCKET,
1147                     sqe,
1148                     domain,
1149                     ptr::null_mut(),
1150                     protocol as u32,
1151                     r#type as u64);
1152    (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1153    /* offset by 1 for allocation */
1154    if file_index == IORING_FILE_INDEX_ALLOC as _ {
1155        file_index -= 1;
1156    }
1157    __io_uring_set_target_fixed_file(sqe, file_index);
1158}
1159
1160#[inline]
1161pub unsafe fn io_uring_prep_socket_direct_alloc(sqe: *mut io_uring_sqe, domain: c_int,
1162                                                r#type: c_int, protocol: c_int, flags: c_uint)
1163{
1164    io_uring_prep_rw(IORING_OP_SOCKET,
1165                     sqe,
1166                     domain,
1167                     ptr::null_mut(),
1168                     protocol as u32,
1169                     r#type as u64);
1170    (*sqe).__liburing_anon_3.rw_flags = flags as i32;
1171    __io_uring_set_target_fixed_file(sqe, (IORING_FILE_INDEX_ALLOC - 1) as _);
1172}
1173
1174#[inline]
1175pub unsafe fn __io_uring_prep_uring_cmd(sqe: *mut io_uring_sqe, op: c_int, cmd_op: u32, fd: c_int)
1176{
1177    (*sqe).opcode = op as _;
1178    (*sqe).fd = fd;
1179    (*sqe).__liburing_anon_1.__liburing_anon_1.cmd_op = cmd_op;
1180    (*sqe).__liburing_anon_1.__liburing_anon_1.__pad1 = 0;
1181    (*sqe).__liburing_anon_2.addr = 0;
1182    (*sqe).len = 0;
1183}
1184
1185#[inline]
1186pub unsafe fn io_uring_prep_uring_cmd(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int)
1187{
1188    __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD as _, cmd_op as _, fd);
1189}
1190
1191#[inline]
1192pub unsafe fn io_uring_prep_uring_cmd128(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int)
1193{
1194    __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD128 as _, cmd_op as _, fd);
1195}
1196
1197/*
1198 * Prepare commands for sockets
1199 */
1200#[inline]
1201pub unsafe fn io_uring_prep_cmd_sock(sqe: *mut io_uring_sqe, cmd_op: c_int, fd: c_int,
1202                                     level: c_int, optname: c_int, optval: *mut c_void,
1203                                     optlen: c_int)
1204{
1205    io_uring_prep_uring_cmd(sqe, cmd_op as _, fd);
1206
1207    *(*sqe).__liburing_anon_6.optval.as_mut() = optval as usize as _;
1208    (*sqe).__liburing_anon_2.__liburing_anon_1.optname = optname as _;
1209    (*sqe).__liburing_anon_5.optlen = optlen as _;
1210    (*sqe).__liburing_anon_1.__liburing_anon_1.cmd_op = cmd_op as _;
1211    (*sqe).__liburing_anon_2.__liburing_anon_1.level = level as _;
1212}
1213
1214#[inline]
1215pub unsafe fn io_uring_prep_cmd_getsockname(sqe: *mut io_uring_sqe, fd: c_int,
1216                                            sockaddr: *mut sockaddr, sockaddr_len: *mut socklen_t,
1217                                            peer: c_int)
1218{
1219    io_uring_prep_uring_cmd(sqe, SOCKET_URING_OP_GETSOCKNAME as _, fd);
1220
1221    (*sqe).__liburing_anon_2.addr = sockaddr as _;
1222    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = sockaddr_len as _;
1223    (*sqe).__liburing_anon_5.optlen = peer as _;
1224}
1225
1226#[inline]
1227pub unsafe fn io_uring_prep_waitid(sqe: *mut io_uring_sqe, idtype: idtype_t, id: id_t,
1228                                   infop: *mut siginfo_t, options: c_int, flags: c_uint)
1229{
1230    io_uring_prep_rw(IORING_OP_WAITID, sqe, id as _, ptr::null_mut(), idtype, 0);
1231    (*sqe).__liburing_anon_3.waitid_flags = flags;
1232    (*sqe).__liburing_anon_5.file_index = options as _;
1233    (*sqe).__liburing_anon_1.addr2 = infop as usize as u64;
1234}
1235
1236#[inline]
1237pub unsafe fn io_uring_prep_futex_wake(sqe: *mut io_uring_sqe, futex: *const u32, val: u64,
1238                                       mask: u64, futex_flags: u32, flags: c_uint)
1239{
1240    io_uring_prep_rw(IORING_OP_FUTEX_WAKE, sqe, futex_flags as _, futex.cast(), 0, val);
1241    (*sqe).__liburing_anon_3.futex_flags = flags;
1242    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = mask;
1243}
1244
1245#[inline]
1246pub unsafe fn io_uring_prep_futex_wait(sqe: *mut io_uring_sqe, futex: *const u32, val: u64,
1247                                       mask: u64, futex_flags: u32, flags: c_uint)
1248{
1249    io_uring_prep_rw(IORING_OP_FUTEX_WAIT, sqe, futex_flags as _, futex.cast(), 0, val);
1250    (*sqe).__liburing_anon_3.futex_flags = flags;
1251    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = mask;
1252}
1253
1254#[inline]
1255pub unsafe fn io_uring_prep_futex_waitv(sqe: *mut io_uring_sqe, futex: *const futex_waitv,
1256                                        nr_futex: u32, flags: c_uint)
1257{
1258    io_uring_prep_rw(IORING_OP_FUTEX_WAITV, sqe, 0, futex.cast(), nr_futex, 0);
1259    (*sqe).__liburing_anon_3.futex_flags = flags;
1260}
1261
1262#[inline]
1263pub unsafe fn io_uring_prep_fixed_fd_install(sqe: *mut io_uring_sqe, fd: c_int, flags: c_uint)
1264{
1265    io_uring_prep_rw(IORING_OP_FIXED_FD_INSTALL, sqe, fd, ptr::null_mut(), 0, 0);
1266
1267    (*sqe).flags = IOSQE_FIXED_FILE as _;
1268    (*sqe).__liburing_anon_3.install_fd_flags = flags;
1269}
1270
1271#[inline]
1272pub unsafe fn io_uring_prep_ftruncate(sqe: *mut io_uring_sqe, fd: c_int, len: c_longlong)
1273{
1274    io_uring_prep_rw(IORING_OP_FTRUNCATE, sqe, fd, ptr::null_mut(), 0, len as _);
1275}
1276
1277#[inline]
1278pub unsafe fn io_uring_prep_cmd_discard(sqe: *mut io_uring_sqe, fd: c_int, offset: u64, nbytes: u64)
1279{
1280    // TODO: really someday fix this
1281    // We need bindgen to actually evaluate this macro's value during generation.
1282    // No idea if hard-coding this value like this is viable in practice.
1283    io_uring_prep_uring_cmd(sqe, ((0x12) << 8) as _ /* BLOCK_URING_CMD_DISCARD */, fd);
1284
1285    (*sqe).__liburing_anon_2.addr = offset;
1286    (*sqe).__liburing_anon_6.__liburing_anon_1.as_mut().addr3 = nbytes;
1287}
1288
1289#[inline]
1290pub unsafe fn io_uring_prep_pipe(sqe: *mut io_uring_sqe, fds: *mut c_int, pipe_flags: c_int)
1291{
1292    io_uring_prep_rw(IORING_OP_PIPE, sqe, 0, fds as *const _, 0, 0);
1293    (*sqe).__liburing_anon_3.pipe_flags = pipe_flags as u32;
1294}
1295
1296/* setup pipe directly into the fixed file table */
1297#[inline]
1298pub unsafe fn io_uring_prep_pipe_direct(sqe: *mut io_uring_sqe, fds: *mut c_int,
1299                                        pipe_flags: c_int, mut file_index: c_uint)
1300{
1301    io_uring_prep_pipe(sqe, fds, pipe_flags);
1302    /* offset by 1 for allocation */
1303    if file_index == IORING_FILE_INDEX_ALLOC as u32 {
1304        file_index -= 1;
1305    }
1306    __io_uring_set_target_fixed_file(sqe, file_index);
1307}
1308
1309/* Read the kernel's SQ head index with appropriate memory ordering */
1310#[inline]
1311pub unsafe fn io_uring_load_sq_head(ring: *mut io_uring) -> c_uint
1312{
1313    /*
1314     * Without acquire ordering, we could overwrite a SQE before the kernel
1315     * finished reading it. We don't need the acquire ordering for
1316     * non-SQPOLL since then we drive updates.
1317     */
1318    if (*ring).flags & IORING_SETUP_SQPOLL > 0 {
1319        return io_uring_smp_load_acquire((*ring).sq.khead);
1320    }
1321
1322    *(*ring).sq.khead
1323}
1324
1325/*
1326 * Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
1327 * the SQ ring
1328 */
1329#[inline]
1330pub unsafe fn io_uring_sq_ready(ring: *mut io_uring) -> c_uint
1331{
1332    (*ring).sq.sqe_tail - io_uring_load_sq_head(ring)
1333}
1334
1335/*
1336 * Returns how much space is left in the SQ ring.
1337 */
1338#[inline]
1339pub unsafe fn io_uring_sq_space_left(ring: *mut io_uring) -> c_uint
1340{
1341    (*ring).sq.ring_entries - io_uring_sq_ready(ring)
1342}
1343
1344/*
1345 * Returns the bit shift needed to index the SQ.
1346 * This shift is 1 for rings with big SQEs, and 0 for rings with normal SQEs.
1347 * SQE `index` can be computed as &sq.sqes[(index & sq.ring_mask) << sqe_shift].
1348 */
1349#[must_use]
1350#[inline]
1351pub fn io_uring_sqe_shift_from_flags(flags: c_uint) -> c_uint
1352{
1353    u32::from(flags & IORING_SETUP_SQE128 != 0)
1354}
1355
1356#[inline]
1357pub unsafe fn io_uring_sqe_shift(ring: *mut io_uring) -> c_uint
1358{
1359    io_uring_sqe_shift_from_flags((*ring).flags)
1360}
1361
1362/*
1363 * Only applicable when using SQPOLL - allows the caller to wait for space
1364 * to free up in the SQ ring, which happens when the kernel side thread has
1365 * consumed one or more entries. If the SQ ring is currently non-full, no
1366 * action is taken. Note: may return -EINVAL if the kernel doesn't support
1367 * this feature.
1368 */
1369#[inline]
1370pub unsafe fn io_uring_sqring_wait(ring: *mut io_uring) -> c_int
1371{
1372    if (*ring).flags & IORING_SETUP_SQPOLL == 0 {
1373        return 0;
1374    }
1375    if io_uring_sq_space_left(ring) > 0 {
1376        return 0;
1377    }
1378
1379    __io_uring_sqring_wait(ring)
1380}
1381
1382/*
1383 * Returns how many unconsumed entries are ready in the CQ ring
1384 */
1385#[inline]
1386pub unsafe fn io_uring_cq_ready(ring: *mut io_uring) -> c_uint
1387{
1388    io_uring_smp_load_acquire((*ring).cq.ktail) - *(*ring).cq.khead
1389}
1390
1391/*
1392 * Returns true if there are overflow entries waiting to be flushed onto
1393 * the CQ ring
1394 */
1395#[inline]
1396pub unsafe fn io_uring_cq_has_overflow(ring: *mut io_uring) -> bool
1397{
1398    IO_URING_READ_ONCE((*ring).sq.kflags) & IORING_SQ_CQ_OVERFLOW > 0
1399}
1400
1401/*
1402 * Returns true if the eventfd notification is currently enabled
1403 */
1404#[inline]
1405pub unsafe fn io_uring_cq_eventfd_enabled(ring: *mut io_uring) -> bool
1406{
1407    if (*ring).cq.kflags.is_null() {
1408        return true;
1409    }
1410    (*(*ring).cq.kflags & IORING_CQ_EVENTFD_DISABLED) == 0
1411}
1412
1413/*
1414 * Toggle eventfd notification on or off, if an eventfd is registered with
1415 * the ring.
1416 */
1417#[inline]
1418pub unsafe fn io_uring_cq_eventfd_toggle(ring: *mut io_uring, enabled: bool) -> c_int
1419{
1420    if enabled == io_uring_cq_eventfd_enabled(ring) {
1421        return 0;
1422    }
1423
1424    if (*ring).cq.kflags.is_null() {
1425        return -(EOPNOTSUPP as c_int);
1426    }
1427
1428    let mut flags = *(*ring).cq.kflags;
1429
1430    if enabled {
1431        flags &= !IORING_CQ_EVENTFD_DISABLED;
1432    } else {
1433        flags |= IORING_CQ_EVENTFD_DISABLED;
1434    }
1435
1436    IO_URING_WRITE_ONCE((*ring).cq.kflags, flags);
1437
1438    0
1439}
1440
1441/*
1442 * Return an IO completion, waiting for 'wait_nr' completions if one isn't
1443 * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
1444 * failure.
1445 */
1446#[inline]
1447pub unsafe fn io_uring_wait_cqe_nr(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe,
1448                                   wait_nr: c_uint)
1449                                   -> c_int
1450{
1451    __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, ptr::null_mut())
1452}
1453
1454#[inline]
1455unsafe fn io_uring_skip_cqe(ring: *mut io_uring, cqe: *mut io_uring_cqe, err: *mut c_int) -> bool
1456{
1457    'out: {
1458        if (*cqe).flags & IORING_CQE_F_SKIP != 0 {
1459            break 'out;
1460        }
1461
1462        if (*ring).features & IORING_FEAT_EXT_ARG != 0 {
1463            return false;
1464        }
1465
1466        if (*cqe).user_data != LIBURING_UDATA_TIMEOUT {
1467            return false;
1468        }
1469
1470        if (*cqe).res < 0 {
1471            *err = (*cqe).res;
1472        }
1473    }
1474
1475    io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
1476    *err == 0
1477}
1478
1479/*
1480 * Internal helper, don't use directly in applications. Use one of the
1481 * "official" versions of this, io_uring_peek_cqe(), io_uring_wait_cqe(),
1482 * or io_uring_wait_cqes*().
1483 */
1484#[inline]
1485unsafe fn __io_uring_peek_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe,
1486                              nr_available: *mut c_uint)
1487                              -> c_int
1488{
1489    let mut cqe;
1490    let mut err = 0;
1491
1492    let mut available;
1493    let mask = (*ring).cq.ring_mask;
1494    let shift = io_uring_cqe_shift(ring);
1495
1496    loop {
1497        let tail = io_uring_smp_load_acquire((*ring).cq.ktail);
1498
1499        /*
1500         * A load_acquire on the head prevents reordering with the
1501         * cqe load below, ensuring that we see the correct cq entry.
1502         */
1503        let head = io_uring_smp_load_acquire((*ring).cq.khead);
1504
1505        cqe = ptr::null_mut();
1506        available = tail - head;
1507        if available == 0 {
1508            break;
1509        }
1510
1511        cqe = &raw mut *(*ring).cq.cqes.add(((head & mask) << shift) as usize);
1512        if !io_uring_skip_cqe(ring, cqe, &raw mut err) {
1513            break;
1514        }
1515    }
1516
1517    *cqe_ptr = cqe;
1518    if !nr_available.is_null() {
1519        *nr_available = available;
1520    }
1521    err
1522}
1523
1524/*
1525 * Return an IO completion, if one is readily available. Returns 0 with
1526 * cqe_ptr filled in on success, -errno on failure.
1527 */
1528#[inline]
1529pub unsafe fn io_uring_peek_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe) -> c_int
1530{
1531    if __io_uring_peek_cqe(ring, cqe_ptr, ptr::null_mut()) == 0 && !(*cqe_ptr).is_null() {
1532        return 0;
1533    }
1534
1535    io_uring_wait_cqe_nr(ring, cqe_ptr, 0)
1536}
1537
1538/*
1539 * Return an IO completion, waiting for it if necessary. Returns 0 with
1540 * cqe_ptr filled in on success, -errno on failure.
1541 */
1542#[inline]
1543pub unsafe fn io_uring_wait_cqe(ring: *mut io_uring, cqe_ptr: *mut *mut io_uring_cqe) -> c_int
1544{
1545    if __io_uring_peek_cqe(ring, cqe_ptr, ptr::null_mut()) == 0 && !(*cqe_ptr).is_null() {
1546        return 0;
1547    }
1548
1549    io_uring_wait_cqe_nr(ring, cqe_ptr, 1)
1550}
1551
1552/*
1553 * Return an sqe to fill. Application must later call io_uring_submit()
1554 * when it's ready to tell the kernel about it. The caller may call this
1555 * function multiple times before calling io_uring_submit().
1556 *
1557 * Returns a vacant sqe, or NULL if we're full.
1558 */
1559#[inline]
1560unsafe fn _io_uring_get_sqe(ring: *mut io_uring) -> *mut io_uring_sqe
1561{
1562    let sq = &raw mut (*ring).sq;
1563
1564    let head = io_uring_load_sq_head(ring);
1565    let tail = (*sq).sqe_tail;
1566
1567    if tail - head >= (*sq).ring_entries {
1568        return ptr::null_mut();
1569    }
1570
1571    let offset = (tail & (*sq).ring_mask) << io_uring_sqe_shift(ring);
1572    let sqe = (*sq).sqes.add(offset as usize);
1573    (*sq).sqe_tail = tail + 1;
1574    io_uring_initialize_sqe(sqe);
1575    sqe
1576}
1577
1578/*
1579 * Return the appropriate mask for a buffer ring of size 'ring_entries'
1580 */
1581#[must_use]
1582#[inline]
1583pub fn io_uring_buf_ring_mask(ring_entries: u32) -> c_int
1584{
1585    (ring_entries - 1) as _
1586}
1587
1588#[inline]
1589pub unsafe fn io_uring_buf_ring_init(br: *mut io_uring_buf_ring)
1590{
1591    (*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail = 0;
1592}
1593
1594/*
1595 * Assign 'buf' with the addr/len/buffer ID supplied
1596 */
1597#[inline]
1598pub unsafe fn io_uring_buf_ring_add(br: *mut io_uring_buf_ring, addr: *mut c_void, len: c_uint,
1599                                    bid: c_ushort, mask: c_int, buf_offset: c_int)
1600{
1601    let tail = (*br).__liburing_anon_1.__liburing_anon_1.as_ref().tail;
1602    let buf = (*br).__liburing_anon_1
1603                   .bufs
1604                   .as_mut()
1605                   .as_mut_ptr()
1606                   .add(((i32::from(tail) + buf_offset) & mask) as usize);
1607
1608    (*buf).addr = addr as usize as u64;
1609    (*buf).len = len;
1610    (*buf).bid = bid;
1611}
1612
1613/*
1614 * Make 'count' new buffers visible to the kernel. Called after
1615 * io_uring_buf_ring_add() has been called 'count' times to fill in new
1616 * buffers.
1617 */
1618#[inline]
1619pub unsafe fn io_uring_buf_ring_advance(br: *mut io_uring_buf_ring, count: c_int)
1620{
1621    let tail = (*br).__liburing_anon_1.__liburing_anon_1.as_ref().tail;
1622    let new_tail = tail.wrapping_add(count as u16);
1623
1624    io_uring_smp_store_release(&raw mut (*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail,
1625                               new_tail);
1626}
1627
1628#[inline]
1629unsafe fn __io_uring_buf_ring_cq_advance(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1630                                         cq_count: i32, buf_count: c_int)
1631{
1632    io_uring_buf_ring_advance(br, buf_count);
1633    io_uring_cq_advance(ring, cq_count as _);
1634}
1635
1636/*
1637 * Make 'count' new buffers visible to the kernel while at the same time
1638 * advancing the CQ ring seen entries. This can be used when the application
1639 * is using ring provided buffers and returns buffers while processing CQEs,
1640 * avoiding an extra atomic when needing to increment both the CQ ring and
1641 * the ring buffer index at the same time.
1642 */
1643#[inline]
1644pub unsafe fn io_uring_buf_ring_cq_advance(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1645                                           count: c_int)
1646{
1647    __io_uring_buf_ring_cq_advance(ring, br, count, count);
1648}
1649
1650#[inline]
1651pub unsafe fn io_uring_buf_ring_available(ring: *mut io_uring, br: *mut io_uring_buf_ring,
1652                                          bgid: c_ushort)
1653                                          -> c_int
1654{
1655    let mut head = 0;
1656    let ret = io_uring_buf_ring_head(ring, bgid.into(), &raw mut head);
1657    if ret > 0 {
1658        return ret;
1659    }
1660    c_int::from((*br).__liburing_anon_1.__liburing_anon_1.as_mut().tail - head)
1661}
1662
1663#[inline]
1664pub unsafe fn io_uring_get_sqe(ring: *mut io_uring) -> *mut io_uring_sqe
1665{
1666    _io_uring_get_sqe(ring)
1667}
1668
1669/*
1670 * Return a 128B sqe to fill. Applications must later call io_uring_submit()
1671 * when it's ready to tell the kernel about it. The caller may call this
1672 * function multiple times before calling io_uring_submit().
1673 *
1674 * Returns a vacant 128B sqe, or NULL if we're full. If the current tail is the
1675 * last entry in the ring, this function will insert a nop + skip complete such
1676 * that the 128b entry wraps back to the beginning of the queue for a
1677 * contiguous big sq entry. It's up to the caller to use a 128b opcode in order
1678 * for the kernel to know how to advance its sq head pointer.
1679 */
1680#[inline]
1681pub unsafe fn io_uring_get_sqe128(ring: *mut io_uring) -> *mut io_uring_sqe
1682{
1683    let sq = &raw mut (*ring).sq;
1684
1685    let head = io_uring_load_sq_head(ring);
1686    let mut tail = (*sq).sqe_tail;
1687
1688    if (*ring).flags & IORING_SETUP_SQE128 != 0 {
1689        return io_uring_get_sqe(ring);
1690    }
1691
1692    if (*ring).flags & IORING_SETUP_SQE_MIXED == 0 {
1693        return ptr::null_mut();
1694    }
1695
1696    let mut sqe: *mut io_uring_sqe;
1697    if (tail + 1) & (*sq).ring_mask == 0 {
1698        if (tail + 2) - head >= (*sq).ring_entries {
1699            return ptr::null_mut();
1700        }
1701
1702        sqe = _io_uring_get_sqe(ring);
1703        io_uring_prep_nop(sqe);
1704        (*sqe).flags |= IOSQE_CQE_SKIP_SUCCESS as u8;
1705        tail = (*sq).sqe_tail;
1706    } else if (tail + 1) - head >= (*sq).ring_entries {
1707        return ptr::null_mut();
1708    }
1709
1710    sqe = &raw mut *(*sq).sqes.add((tail & (*sq).ring_mask) as usize);
1711    (*sq).sqe_tail = tail + 2;
1712    io_uring_initialize_sqe(sqe);
1713    sqe
1714}
1715
1716//-----------------------------------------------------------------------------
1717
1718impl From<Duration> for timespec
1719{
1720    #[cfg(not(any(target_arch = "powerpc", target_arch = "arm")))]
1721    #[inline]
1722    fn from(duration: Duration) -> Self
1723    {
1724        let mut ts = unsafe { zeroed::<timespec>() };
1725        ts.tv_sec = duration.as_secs() as _;
1726        ts.tv_nsec = duration.subsec_nanos().into();
1727        ts
1728    }
1729
1730    #[cfg(any(target_arch = "powerpc", target_arch = "arm"))]
1731    #[inline]
1732    fn from(duration: Duration) -> Self
1733    {
1734        let mut ts = unsafe { zeroed::<timespec>() };
1735        ts.tv_sec = duration.as_secs() as _;
1736        ts.tv_nsec = duration.subsec_nanos().try_into().unwrap();
1737        ts
1738    }
1739}
1740
1741impl From<Duration> for __kernel_timespec
1742{
1743    #[inline]
1744    fn from(duration: Duration) -> Self
1745    {
1746        let mut ts = unsafe { zeroed::<__kernel_timespec>() };
1747        ts.tv_sec = duration.as_secs() as _;
1748        ts.tv_nsec = duration.subsec_nanos().into();
1749        ts
1750    }
1751}