io_uring/
opcode.rs

1//! Operation codes that can be used to construct [`squeue::Entry`](crate::squeue::Entry)s.
2
3#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15    ( $sqe:ident . fd = $opfd:expr ) => {
16        match $opfd {
17            sealed::Target::Fd(fd) => $sqe.fd = fd,
18            sealed::Target::Fixed(idx) => {
19                $sqe.fd = idx as _;
20                $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21            }
22        }
23    };
24}
25
26macro_rules! opcode {
27    (@type impl sealed::UseFixed ) => {
28        sealed::Target
29    };
30    (@type impl sealed::UseFd ) => {
31        RawFd
32    };
33    (@type $name:ty ) => {
34        $name
35    };
36    (
37        $( #[$outer:meta] )*
38        pub struct $name:ident {
39            $( #[$new_meta:meta] )*
40
41            $( $field:ident : { $( $tnt:tt )+ } ),*
42
43            $(,)?
44
45            ;;
46
47            $(
48                $( #[$opt_meta:meta] )*
49                $opt_field:ident : $opt_tname:ty = $default:expr
50            ),*
51
52            $(,)?
53        }
54
55        pub const CODE = $opcode:expr;
56
57        $( #[$build_meta:meta] )*
58        pub fn build($self:ident) -> $entry:ty $build_block:block
59    ) => {
60        $( #[$outer] )*
61        pub struct $name {
62            $( $field : opcode!(@type $( $tnt )*), )*
63            $( $opt_field : $opt_tname, )*
64        }
65
66        impl $name {
67            $( #[$new_meta] )*
68            #[inline]
69            pub fn new($( $field : $( $tnt )* ),*) -> Self {
70                $name {
71                    $( $field: $field.into(), )*
72                    $( $opt_field: $default, )*
73                }
74            }
75
76            /// The opcode of the operation. This can be passed to
77            /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is
78            /// supported with the current kernel.
79            pub const CODE: u8 = $opcode as _;
80
81            $(
82                $( #[$opt_meta] )*
83                #[inline]
84                pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85                    self.$opt_field = $opt_field;
86                    self
87                }
88            )*
89
90            $( #[$build_meta] )*
91            #[inline]
92            pub fn build($self) -> $entry $build_block
93        }
94    }
95}
96
97/// inline zeroed to improve codegen
98#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100    unsafe { mem::zeroed() }
101}
102
103opcode! {
104    /// Do not perform any I/O.
105    ///
106    /// This is useful for testing the performance of the io_uring implementation itself.
107    #[derive(Debug)]
108    pub struct Nop { ;; }
109
110    pub const CODE = sys::IORING_OP_NOP;
111
112    pub fn build(self) -> Entry {
113        let Nop {} = self;
114
115        let mut sqe = sqe_zeroed();
116        sqe.opcode = Self::CODE;
117        sqe.fd = -1;
118        Entry(sqe)
119    }
120}
121
122opcode! {
123    /// Vectored read, equivalent to `preadv2(2)`.
124    #[derive(Debug)]
125    pub struct Readv {
126        fd: { impl sealed::UseFixed },
127        iovec: { *const libc::iovec },
128        len: { u32 },
129        ;;
130        ioprio: u16 = 0,
131        offset: u64 = 0,
132        /// specified for read operations, contains a bitwise OR of per-I/O flags,
133        /// as described in the `preadv2(2)` man page.
134        rw_flags: types::RwFlags = 0,
135        buf_group: u16 = 0
136    }
137
138    pub const CODE = sys::IORING_OP_READV;
139
140    pub fn build(self) -> Entry {
141        let Readv {
142            fd,
143            iovec, len, offset,
144            ioprio, rw_flags,
145            buf_group
146        } = self;
147
148        let mut sqe = sqe_zeroed();
149        sqe.opcode = Self::CODE;
150        assign_fd!(sqe.fd = fd);
151        sqe.ioprio = ioprio;
152        sqe.__bindgen_anon_2.addr = iovec as _;
153        sqe.len = len;
154        sqe.__bindgen_anon_1.off = offset;
155        sqe.__bindgen_anon_3.rw_flags = rw_flags;
156        sqe.__bindgen_anon_4.buf_group = buf_group;
157        Entry(sqe)
158    }
159}
160
161opcode! {
162    /// Vectored write, equivalent to `pwritev2(2)`.
163    #[derive(Debug)]
164    pub struct Writev {
165        fd: { impl sealed::UseFixed },
166        iovec: { *const libc::iovec },
167        len: { u32 },
168        ;;
169        ioprio: u16 = 0,
170        offset: u64 = 0,
171        /// specified for write operations, contains a bitwise OR of per-I/O flags,
172        /// as described in the `preadv2(2)` man page.
173        rw_flags: types::RwFlags = 0
174    }
175
176    pub const CODE = sys::IORING_OP_WRITEV;
177
178    pub fn build(self) -> Entry {
179        let Writev {
180            fd,
181            iovec, len, offset,
182            ioprio, rw_flags
183        } = self;
184
185        let mut sqe = sqe_zeroed();
186        sqe.opcode = Self::CODE;
187        assign_fd!(sqe.fd = fd);
188        sqe.ioprio = ioprio;
189        sqe.__bindgen_anon_2.addr = iovec as _;
190        sqe.len = len;
191        sqe.__bindgen_anon_1.off = offset;
192        sqe.__bindgen_anon_3.rw_flags = rw_flags;
193        Entry(sqe)
194    }
195}
196
197opcode! {
198    /// File sync, equivalent to `fsync(2)`.
199    ///
200    /// Note that, while I/O is initiated in the order in which it appears in the submission queue,
201    /// completions are unordered. For example, an application which places a write I/O followed by
202    /// an fsync in the submission queue cannot expect the fsync to apply to the write. The two
203    /// operations execute in parallel, so the fsync may complete before the write is issued to the
204    /// storage. The same is also true for previously issued writes that have not completed prior to
205    /// the fsync.
206    #[derive(Debug)]
207    pub struct Fsync {
208        fd: { impl sealed::UseFixed },
209        ;;
210        /// The `flags` bit mask may contain either 0, for a normal file integrity sync,
211        /// or [types::FsyncFlags::DATASYNC] to provide data sync only semantics.
212        /// See the descriptions of `O_SYNC` and `O_DSYNC` in the `open(2)` manual page for more information.
213        flags: types::FsyncFlags = types::FsyncFlags::empty()
214    }
215
216    pub const CODE = sys::IORING_OP_FSYNC;
217
218    pub fn build(self) -> Entry {
219        let Fsync { fd, flags } = self;
220
221        let mut sqe = sqe_zeroed();
222        sqe.opcode = Self::CODE;
223        assign_fd!(sqe.fd = fd);
224        sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225        Entry(sqe)
226    }
227}
228
229opcode! {
230    /// Read from a file into a fixed buffer that has been previously registered with
231    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
232    ///
233    /// The return values match those documented in the `preadv2(2)` man pages.
234    #[derive(Debug)]
235    pub struct ReadFixed {
236        fd: { impl sealed::UseFixed },
237        buf: { *mut u8 },
238        len: { u32 },
239        buf_index: { u16 },
240        ;;
241        ioprio: u16 = 0,
242        /// The offset of the file to read from.
243        offset: u64 = 0,
244        /// Specified for read operations, contains a bitwise OR of per-I/O flags, as described in
245        /// the `preadv2(2)` man page.
246        rw_flags: types::RwFlags = 0
247    }
248
249    pub const CODE = sys::IORING_OP_READ_FIXED;
250
251    pub fn build(self) -> Entry {
252        let ReadFixed {
253            fd,
254            buf, len, offset,
255            buf_index,
256            ioprio, rw_flags
257        } = self;
258
259        let mut sqe = sqe_zeroed();
260        sqe.opcode = Self::CODE;
261        assign_fd!(sqe.fd = fd);
262        sqe.ioprio = ioprio;
263        sqe.__bindgen_anon_2.addr = buf as _;
264        sqe.len = len;
265        sqe.__bindgen_anon_1.off = offset;
266        sqe.__bindgen_anon_3.rw_flags = rw_flags;
267        sqe.__bindgen_anon_4.buf_index = buf_index;
268        Entry(sqe)
269    }
270}
271
272opcode! {
273    /// Write to a file from a fixed buffer that have been previously registered with
274    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
275    ///
276    /// The return values match those documented in the `pwritev2(2)` man pages.
277    #[derive(Debug)]
278    pub struct WriteFixed {
279        fd: { impl sealed::UseFixed },
280        buf: { *const u8 },
281        len: { u32 },
282        buf_index: { u16 },
283        ;;
284        ioprio: u16 = 0,
285        /// The offset of the file to write to.
286        offset: u64 = 0,
287        /// Specified for write operations, contains a bitwise OR of per-I/O flags, as described in
288        /// the `pwritev2(2)` man page.
289        rw_flags: types::RwFlags = 0
290    }
291
292    pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294    pub fn build(self) -> Entry {
295        let WriteFixed {
296            fd,
297            buf, len, offset,
298            buf_index,
299            ioprio, rw_flags
300        } = self;
301
302        let mut sqe = sqe_zeroed();
303        sqe.opcode = Self::CODE;
304        assign_fd!(sqe.fd = fd);
305        sqe.ioprio = ioprio;
306        sqe.__bindgen_anon_2.addr = buf as _;
307        sqe.len = len;
308        sqe.__bindgen_anon_1.off = offset;
309        sqe.__bindgen_anon_3.rw_flags = rw_flags;
310        sqe.__bindgen_anon_4.buf_index = buf_index;
311        Entry(sqe)
312    }
313}
314
315opcode! {
316    /// Poll the specified fd.
317    ///
318    /// Unlike poll or epoll without `EPOLLONESHOT`, this interface defaults to work in one shot mode.
319    /// That is, once the poll operation is completed, it will have to be resubmitted.
320    ///
321    /// If multi is set, the poll will work in multi shot mode instead. That means it will
322    /// repeatedly trigger when the requested event becomes true, and hence multiple CQEs can be
323    /// generated from this single submission. The CQE flags field will have IORING_CQE_F_MORE set
324    /// on completion if the application should expect further CQE entries from the original
325    /// request. If this flag isn't set on completion, then the poll request has been terminated
326    /// and no further events will be generated. This mode is available since 5.13.
327    #[derive(Debug)]
328    pub struct PollAdd {
329        /// The bits that may be set in `flags` are defined in `<poll.h>`,
330        /// and documented in `poll(2)`.
331        fd: { impl sealed::UseFixed },
332        flags: { u32 },
333        ;;
334        multi: bool = false
335    }
336
337    pub const CODE = sys::IORING_OP_POLL_ADD;
338
339    pub fn build(self) -> Entry {
340        let PollAdd { fd, flags, multi } = self;
341
342        let mut sqe = sqe_zeroed();
343        sqe.opcode = Self::CODE;
344        assign_fd!(sqe.fd = fd);
345        if multi {
346            sqe.len = sys::IORING_POLL_ADD_MULTI;
347        }
348
349        #[cfg(target_endian = "little")] {
350            sqe.__bindgen_anon_3.poll32_events = flags;
351        }
352
353        #[cfg(target_endian = "big")] {
354            let x = flags << 16;
355            let y = flags >> 16;
356            let flags = x | y;
357            sqe.__bindgen_anon_3.poll32_events = flags;
358        }
359
360        Entry(sqe)
361    }
362}
363
364opcode! {
365    /// Remove an existing [poll](PollAdd) request.
366    ///
367    /// If found, the `result` method of the `cqueue::Entry` will return 0.
368    /// If not found, `result` will return `-libc::ENOENT`.
369    #[derive(Debug)]
370    pub struct PollRemove {
371        user_data: { u64 }
372        ;;
373    }
374
375    pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377    pub fn build(self) -> Entry {
378        let PollRemove { user_data } = self;
379
380        let mut sqe = sqe_zeroed();
381        sqe.opcode = Self::CODE;
382        sqe.fd = -1;
383        sqe.__bindgen_anon_2.addr = user_data;
384        Entry(sqe)
385    }
386}
387
388opcode! {
389    /// Sync a file segment with disk, equivalent to `sync_file_range(2)`.
390    #[derive(Debug)]
391    pub struct SyncFileRange {
392        fd: { impl sealed::UseFixed },
393        len: { u32 },
394        ;;
395        /// the offset method holds the offset in bytes
396        offset: u64 = 0,
397        /// the flags method holds the flags for the command
398        flags: u32 = 0
399    }
400
401    pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403    pub fn build(self) -> Entry {
404        let SyncFileRange {
405            fd,
406            len, offset,
407            flags
408        } = self;
409
410        let mut sqe = sqe_zeroed();
411        sqe.opcode = Self::CODE;
412        assign_fd!(sqe.fd = fd);
413        sqe.len = len;
414        sqe.__bindgen_anon_1.off = offset;
415        sqe.__bindgen_anon_3.sync_range_flags = flags;
416        Entry(sqe)
417    }
418}
419
420opcode! {
421    /// Send a message on a socket, equivalent to `send(2)`.
422    ///
423    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
424    /// structure, and flags holds the flags associated with the system call.
425    #[derive(Debug)]
426    pub struct SendMsg {
427        fd: { impl sealed::UseFixed },
428        msg: { *const libc::msghdr },
429        ;;
430        ioprio: u16 = 0,
431        flags: u32 = 0
432    }
433
434    pub const CODE = sys::IORING_OP_SENDMSG;
435
436    pub fn build(self) -> Entry {
437        let SendMsg { fd, msg, ioprio, flags } = self;
438
439        let mut sqe = sqe_zeroed();
440        sqe.opcode = Self::CODE;
441        assign_fd!(sqe.fd = fd);
442        sqe.ioprio = ioprio;
443        sqe.__bindgen_anon_2.addr = msg as _;
444        sqe.len = 1;
445        sqe.__bindgen_anon_3.msg_flags = flags;
446        Entry(sqe)
447    }
448}
449
450opcode! {
451    /// Receive a message on a socket, equivalent to `recvmsg(2)`.
452    ///
453    /// See also the description of [`SendMsg`].
454    #[derive(Debug)]
455    pub struct RecvMsg {
456        fd: { impl sealed::UseFixed },
457        msg: { *mut libc::msghdr },
458        ;;
459        ioprio: u16 = 0,
460        flags: u32 = 0,
461        buf_group: u16 = 0
462    }
463
464    pub const CODE = sys::IORING_OP_RECVMSG;
465
466    pub fn build(self) -> Entry {
467        let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469        let mut sqe = sqe_zeroed();
470        sqe.opcode = Self::CODE;
471        assign_fd!(sqe.fd = fd);
472        sqe.ioprio = ioprio;
473        sqe.__bindgen_anon_2.addr = msg as _;
474        sqe.len = 1;
475        sqe.__bindgen_anon_3.msg_flags = flags;
476        sqe.__bindgen_anon_4.buf_group = buf_group;
477        Entry(sqe)
478    }
479}
480
481opcode! {
482    /// Receive multiple messages on a socket, equivalent to `recvmsg(2)`.
483    ///
484    /// Parameters:
485    ///     msg:       For this multishot variant of ResvMsg, only the msg_namelen and msg_controllen
486    ///                fields are relevant.
487    ///     buf_group: The id of the provided buffer pool to use for each received message.
488    ///
489    /// See also the description of [`SendMsg`] and [`types::RecvMsgOut`].
490    ///
491    /// The multishot version allows the application to issue a single receive request, which
492    /// repeatedly posts a CQE when data is available. It requires the MSG_WAITALL flag is not set.
493    /// Each CQE will take a buffer out of a provided buffer pool for receiving. The application
494    /// should check the flags of each CQE, regardless of its result. If a posted CQE does not have
495    /// the IORING_CQE_F_MORE flag set then the multishot receive will be done and the application
496    /// should issue a new request.
497    ///
498    /// Unlike [`RecvMsg`], this multishot recvmsg will prepend a struct which describes the layout
499    /// of the rest of the buffer in combination with the initial msghdr structure submitted with
500    /// the request. Use [`types::RecvMsgOut`] to parse the data received and access its
501    /// components.
502    ///
503    /// The recvmsg multishot variant is available since kernel 6.0.
504    #[derive(Debug)]
505    pub struct RecvMsgMulti {
506        fd: { impl sealed::UseFixed },
507        msg: { *const libc::msghdr },
508        buf_group: { u16 },
509        ;;
510        ioprio: u16 = 0,
511        flags: u32 = 0
512    }
513
514    pub const CODE = sys::IORING_OP_RECVMSG;
515
516    pub fn build(self) -> Entry {
517        let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519        let mut sqe = sqe_zeroed();
520        sqe.opcode = Self::CODE;
521        assign_fd!(sqe.fd = fd);
522        sqe.__bindgen_anon_2.addr = msg as _;
523        sqe.len = 1;
524        sqe.__bindgen_anon_3.msg_flags = flags;
525        sqe.__bindgen_anon_4.buf_group = buf_group;
526        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527        sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528        Entry(sqe)
529    }
530}
531
532opcode! {
533    /// Register a timeout operation.
534    ///
535    /// A timeout will trigger a wakeup event on the completion ring for anyone waiting for events.
536    /// A timeout condition is met when either the specified timeout expires, or the specified number of events have completed.
537    /// Either condition will trigger the event.
538    /// The request will complete with `-ETIME` if the timeout got completed through expiration of the timer,
539    /// or 0 if the timeout got completed through requests completing on their own.
540    /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`.
541    #[derive(Debug)]
542    pub struct Timeout {
543        timespec: { *const types::Timespec },
544        ;;
545        /// `count` may contain a completion event count.
546        /// If [`TimeoutFlags::MULTISHOT`](types::TimeoutFlags::MULTISHOT) is set in `flags`, this is the number of repeats.
547        /// A value of 0 means the timeout is indefinite and can only be stopped by a removal request.
548        count: u32 = 0,
549
550        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
551    }
552
553    pub const CODE = sys::IORING_OP_TIMEOUT;
554
555    pub fn build(self) -> Entry {
556        let Timeout { timespec, count, flags } = self;
557
558        let mut sqe = sqe_zeroed();
559        sqe.opcode = Self::CODE;
560        sqe.fd = -1;
561        sqe.__bindgen_anon_2.addr = timespec as _;
562        sqe.len = 1;
563        sqe.__bindgen_anon_1.off = count as _;
564        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
565        Entry(sqe)
566    }
567}
568
569// === 5.5 ===
570
571opcode! {
572    /// Attempt to remove an existing [timeout operation](Timeout).
573    pub struct TimeoutRemove {
574        user_data: { u64 },
575        ;;
576    }
577
578    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
579
580    pub fn build(self) -> Entry {
581        let TimeoutRemove { user_data } = self;
582
583        let mut sqe = sqe_zeroed();
584        sqe.opcode = Self::CODE;
585        sqe.fd = -1;
586        sqe.__bindgen_anon_2.addr = user_data;
587        Entry(sqe)
588    }
589}
590
591opcode! {
592    /// Attempt to update an existing [timeout operation](Timeout) with a new timespec.
593    /// The optional `count` value of the original timeout value cannot be updated.
594    pub struct TimeoutUpdate {
595        user_data: { u64 },
596        timespec: { *const types::Timespec },
597        ;;
598        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
599    }
600
601    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
602
603    pub fn build(self) -> Entry {
604        let TimeoutUpdate { user_data, timespec, flags } = self;
605
606        let mut sqe = sqe_zeroed();
607        sqe.opcode = Self::CODE;
608        sqe.fd = -1;
609        sqe.__bindgen_anon_1.off = timespec as _;
610        sqe.__bindgen_anon_2.addr = user_data;
611        sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
612        Entry(sqe)
613    }
614}
615
616opcode! {
617    /// Accept a new connection on a socket, equivalent to `accept4(2)`.
618    pub struct Accept {
619        fd: { impl sealed::UseFixed },
620        addr: { *mut libc::sockaddr },
621        addrlen: { *mut libc::socklen_t },
622        ;;
623        file_index: Option<types::DestinationSlot> = None,
624        flags: i32 = 0
625    }
626
627    pub const CODE = sys::IORING_OP_ACCEPT;
628
629    pub fn build(self) -> Entry {
630        let Accept { fd, addr, addrlen, file_index, flags } = self;
631
632        let mut sqe = sqe_zeroed();
633        sqe.opcode = Self::CODE;
634        assign_fd!(sqe.fd = fd);
635        sqe.__bindgen_anon_2.addr = addr as _;
636        sqe.__bindgen_anon_1.addr2 = addrlen as _;
637        sqe.__bindgen_anon_3.accept_flags = flags as _;
638        if let Some(dest) = file_index {
639            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
640        }
641        Entry(sqe)
642    }
643}
644
645opcode! {
646    /// Set a socket option.
647    pub struct SetSockOpt {
648        fd: { impl sealed::UseFixed },
649        level: { u32 },
650        optname: { u32 },
651        optval: { *const libc::c_void },
652        optlen: { u32 },
653        ;;
654        flags: u32 = 0
655    }
656
657    pub const CODE = sys::IORING_OP_URING_CMD;
658
659    pub fn build(self) -> Entry {
660        let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
661        let mut sqe = sqe_zeroed();
662        sqe.opcode = Self::CODE;
663        assign_fd!(sqe.fd = fd);
664        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
665
666        sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
667        sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
668        sqe.__bindgen_anon_3.uring_cmd_flags = flags;
669        sqe.__bindgen_anon_5.optlen = optlen;
670        unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
671        Entry(sqe)
672    }
673}
674
675opcode! {
676    /// Attempt to cancel an already issued request.
677    pub struct AsyncCancel {
678        user_data: { u64 }
679        ;;
680
681        // TODO flags
682    }
683
684    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
685
686    pub fn build(self) -> Entry {
687        let AsyncCancel { user_data } = self;
688
689        let mut sqe = sqe_zeroed();
690        sqe.opcode = Self::CODE;
691        sqe.fd = -1;
692        sqe.__bindgen_anon_2.addr = user_data;
693        Entry(sqe)
694    }
695}
696
697opcode! {
698    /// This request must be linked with another request through
699    /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below.
700    /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue.
701    pub struct LinkTimeout {
702        timespec: { *const types::Timespec },
703        ;;
704        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
705    }
706
707    pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
708
709    pub fn build(self) -> Entry {
710        let LinkTimeout { timespec, flags } = self;
711
712        let mut sqe = sqe_zeroed();
713        sqe.opcode = Self::CODE;
714        sqe.fd = -1;
715        sqe.__bindgen_anon_2.addr = timespec as _;
716        sqe.len = 1;
717        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
718        Entry(sqe)
719    }
720}
721
722opcode! {
723    /// Connect a socket, equivalent to `connect(2)`.
724    pub struct Connect {
725        fd: { impl sealed::UseFixed },
726        addr: { *const libc::sockaddr },
727        addrlen: { libc::socklen_t }
728        ;;
729    }
730
731    pub const CODE = sys::IORING_OP_CONNECT;
732
733    pub fn build(self) -> Entry {
734        let Connect { fd, addr, addrlen } = self;
735
736        let mut sqe = sqe_zeroed();
737        sqe.opcode = Self::CODE;
738        assign_fd!(sqe.fd = fd);
739        sqe.__bindgen_anon_2.addr = addr as _;
740        sqe.__bindgen_anon_1.off = addrlen as _;
741        Entry(sqe)
742    }
743}
744
745// === 5.6 ===
746
747opcode! {
748    /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`.
749    pub struct Fallocate {
750        fd: { impl sealed::UseFixed },
751        len: { u64 },
752        ;;
753        offset: u64 = 0,
754        mode: i32 = 0
755    }
756
757    pub const CODE = sys::IORING_OP_FALLOCATE;
758
759    pub fn build(self) -> Entry {
760        let Fallocate { fd, len, offset, mode } = self;
761
762        let mut sqe = sqe_zeroed();
763        sqe.opcode = Self::CODE;
764        assign_fd!(sqe.fd = fd);
765        sqe.__bindgen_anon_2.addr = len;
766        sqe.len = mode as _;
767        sqe.__bindgen_anon_1.off = offset;
768        Entry(sqe)
769    }
770}
771
772opcode! {
773    /// Open a file, equivalent to `openat(2)`.
774    pub struct OpenAt {
775        dirfd: { impl sealed::UseFd },
776        pathname: { *const libc::c_char },
777        ;;
778        file_index: Option<types::DestinationSlot> = None,
779        flags: i32 = 0,
780        mode: libc::mode_t = 0
781    }
782
783    pub const CODE = sys::IORING_OP_OPENAT;
784
785    pub fn build(self) -> Entry {
786        let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
787
788        let mut sqe = sqe_zeroed();
789        sqe.opcode = Self::CODE;
790        sqe.fd = dirfd;
791        sqe.__bindgen_anon_2.addr = pathname as _;
792        sqe.len = mode;
793        sqe.__bindgen_anon_3.open_flags = flags as _;
794        if let Some(dest) = file_index {
795            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
796        }
797        Entry(sqe)
798    }
799}
800
801opcode! {
802    /// Close a file descriptor, equivalent to `close(2)`.
803    ///
804    /// Use a types::Fixed(fd) argument to close an io_uring direct descriptor.
805    pub struct Close {
806        fd: { impl sealed::UseFixed },
807        ;;
808    }
809
810    pub const CODE = sys::IORING_OP_CLOSE;
811
812    pub fn build(self) -> Entry {
813        let Close { fd } = self;
814
815        let mut sqe = sqe_zeroed();
816        sqe.opcode = Self::CODE;
817        match fd {
818            sealed::Target::Fd(fd) => sqe.fd = fd,
819            sealed::Target::Fixed(idx) => {
820                sqe.fd = 0;
821                sqe.__bindgen_anon_5.file_index = idx + 1;
822            }
823        }
824        Entry(sqe)
825    }
826}
827
828opcode! {
829    /// This command is an alternative to using
830    /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then
831    /// works in an async fashion, like the rest of the io_uring commands.
832    pub struct FilesUpdate {
833        fds: { *const RawFd },
834        len: { u32 },
835        ;;
836        offset: i32 = 0
837    }
838
839    pub const CODE = sys::IORING_OP_FILES_UPDATE;
840
841    pub fn build(self) -> Entry {
842        let FilesUpdate { fds, len, offset } = self;
843
844        let mut sqe = sqe_zeroed();
845        sqe.opcode = Self::CODE;
846        sqe.fd = -1;
847        sqe.__bindgen_anon_2.addr = fds as _;
848        sqe.len = len;
849        sqe.__bindgen_anon_1.off = offset as _;
850        Entry(sqe)
851    }
852}
853
854opcode! {
855    /// Get file status, equivalent to `statx(2)`.
856    pub struct Statx {
857        dirfd: { impl sealed::UseFd },
858        pathname: { *const libc::c_char },
859        statxbuf: { *mut types::statx },
860        ;;
861        flags: i32 = 0,
862        mask: u32 = 0
863    }
864
865    pub const CODE = sys::IORING_OP_STATX;
866
867    pub fn build(self) -> Entry {
868        let Statx {
869            dirfd, pathname, statxbuf,
870            flags, mask
871        } = self;
872
873        let mut sqe = sqe_zeroed();
874        sqe.opcode = Self::CODE;
875        sqe.fd = dirfd;
876        sqe.__bindgen_anon_2.addr = pathname as _;
877        sqe.len = mask;
878        sqe.__bindgen_anon_1.off = statxbuf as _;
879        sqe.__bindgen_anon_3.statx_flags = flags as _;
880        Entry(sqe)
881    }
882}
883
884opcode! {
885    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
886    ///
887    /// * `fd` is the file descriptor to be operated on,
888    /// * `addr` contains the buffer in question,
889    /// * `len` contains the length of the IO operation,
890    ///
891    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
892    /// See also `read(2)` and `write(2)` for the general description of the related system call.
893    ///
894    /// Available since 5.6.
895    pub struct Read {
896        fd: { impl sealed::UseFixed },
897        buf: { *mut u8 },
898        len: { u32 },
899        ;;
900        /// `offset` contains the read or write offset.
901        ///
902        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
903        /// If `offset` is set to `-1`, the offset will use (and advance) the file position,
904        /// like the `read(2)` and `write(2)` system calls.
905        offset: u64 = 0,
906        ioprio: u16 = 0,
907        rw_flags: types::RwFlags = 0,
908        buf_group: u16 = 0
909    }
910
911    pub const CODE = sys::IORING_OP_READ;
912
913    pub fn build(self) -> Entry {
914        let Read {
915            fd,
916            buf, len, offset,
917            ioprio, rw_flags,
918            buf_group
919        } = self;
920
921        let mut sqe = sqe_zeroed();
922        sqe.opcode = Self::CODE;
923        assign_fd!(sqe.fd = fd);
924        sqe.ioprio = ioprio;
925        sqe.__bindgen_anon_2.addr = buf as _;
926        sqe.len = len;
927        sqe.__bindgen_anon_1.off = offset;
928        sqe.__bindgen_anon_3.rw_flags = rw_flags;
929        sqe.__bindgen_anon_4.buf_group = buf_group;
930        Entry(sqe)
931    }
932}
933
934opcode! {
935    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
936    ///
937    /// * `fd` is the file descriptor to be operated on,
938    /// * `addr` contains the buffer in question,
939    /// * `len` contains the length of the IO operation,
940    ///
941    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
942    /// See also `read(2)` and `write(2)` for the general description of the related system call.
943    ///
944    /// Available since 5.6.
945    pub struct Write {
946        fd: { impl sealed::UseFixed },
947        buf: { *const u8 },
948        len: { u32 },
949        ;;
950        /// `offset` contains the read or write offset.
951        ///
952        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
953        /// If `offsett` is set to `-1`, the offset will use (and advance) the file position,
954        /// like the `read(2)` and `write(2)` system calls.
955        offset: u64 = 0,
956        ioprio: u16 = 0,
957        rw_flags: types::RwFlags = 0
958    }
959
960    pub const CODE = sys::IORING_OP_WRITE;
961
962    pub fn build(self) -> Entry {
963        let Write {
964            fd,
965            buf, len, offset,
966            ioprio, rw_flags
967        } = self;
968
969        let mut sqe = sqe_zeroed();
970        sqe.opcode = Self::CODE;
971        assign_fd!(sqe.fd = fd);
972        sqe.ioprio = ioprio;
973        sqe.__bindgen_anon_2.addr = buf as _;
974        sqe.len = len;
975        sqe.__bindgen_anon_1.off = offset;
976        sqe.__bindgen_anon_3.rw_flags = rw_flags;
977        Entry(sqe)
978    }
979}
980
981opcode! {
982    /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`.
983    pub struct Fadvise {
984        fd: { impl sealed::UseFixed },
985        len: { libc::off_t },
986        advice: { i32 },
987        ;;
988        offset: u64 = 0,
989    }
990
991    pub const CODE = sys::IORING_OP_FADVISE;
992
993    pub fn build(self) -> Entry {
994        let Fadvise { fd, len, advice, offset } = self;
995
996        let mut sqe = sqe_zeroed();
997        sqe.opcode = Self::CODE;
998        assign_fd!(sqe.fd = fd);
999        sqe.len = len as _;
1000        sqe.__bindgen_anon_1.off = offset;
1001        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1002        Entry(sqe)
1003    }
1004}
1005
1006opcode! {
1007    /// Give advice about use of memory, equivalent to `madvise(2)`.
1008    pub struct Madvise {
1009        addr: { *const libc::c_void },
1010        len: { libc::off_t },
1011        advice: { i32 },
1012        ;;
1013    }
1014
1015    pub const CODE = sys::IORING_OP_MADVISE;
1016
1017    pub fn build(self) -> Entry {
1018        let Madvise { addr, len, advice } = self;
1019
1020        let mut sqe = sqe_zeroed();
1021        sqe.opcode = Self::CODE;
1022        sqe.fd = -1;
1023        sqe.__bindgen_anon_2.addr = addr as _;
1024        sqe.len = len as _;
1025        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1026        Entry(sqe)
1027    }
1028}
1029
1030opcode! {
1031    /// Send a message on a socket, equivalent to `send(2)`.
1032    pub struct Send {
1033        fd: { impl sealed::UseFixed },
1034        buf: { *const u8 },
1035        len: { u32 },
1036        ;;
1037        flags: i32 = 0,
1038
1039        /// Set the destination address, for sending from an unconnected socket.
1040        ///
1041        /// When set, `dest_addr_len` must be set as well.
1042        /// See also `man 3 io_uring_prep_send_set_addr`.
1043        dest_addr: *const libc::sockaddr = core::ptr::null(),
1044        dest_addr_len: libc::socklen_t = 0,
1045    }
1046
1047    pub const CODE = sys::IORING_OP_SEND;
1048
1049    pub fn build(self) -> Entry {
1050        let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1051
1052        let mut sqe = sqe_zeroed();
1053        sqe.opcode = Self::CODE;
1054        assign_fd!(sqe.fd = fd);
1055        sqe.__bindgen_anon_2.addr = buf as _;
1056        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1057        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1058        sqe.len = len;
1059        sqe.__bindgen_anon_3.msg_flags = flags as _;
1060        Entry(sqe)
1061    }
1062}
1063
1064opcode! {
1065    /// Receive a message from a socket, equivalent to `recv(2)`.
1066    pub struct Recv {
1067        fd: { impl sealed::UseFixed },
1068        buf: { *mut u8 },
1069        len: { u32 },
1070        ;;
1071        flags: i32 = 0,
1072        buf_group: u16 = 0
1073    }
1074
1075    pub const CODE = sys::IORING_OP_RECV;
1076
1077    pub fn build(self) -> Entry {
1078        let Recv { fd, buf, len, flags, buf_group } = self;
1079
1080        let mut sqe = sqe_zeroed();
1081        sqe.opcode = Self::CODE;
1082        assign_fd!(sqe.fd = fd);
1083        sqe.__bindgen_anon_2.addr = buf as _;
1084        sqe.len = len;
1085        sqe.__bindgen_anon_3.msg_flags = flags as _;
1086        sqe.__bindgen_anon_4.buf_group = buf_group;
1087        Entry(sqe)
1088    }
1089}
1090
1091opcode! {
1092    /// Receive multiple messages from a socket, equivalent to `recv(2)`.
1093    ///
1094    /// Parameter:
1095    ///     buf_group: The id of the provided buffer pool to use for each received message.
1096    ///
1097    /// MSG_WAITALL should not be set in flags.
1098    ///
1099    /// The multishot version allows the application to issue a single receive request, which
1100    /// repeatedly posts a CQE when data is available. Each CQE will take a buffer out of a
1101    /// provided buffer pool for receiving. The application should check the flags of each CQE,
1102    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
1103    /// the multishot receive will be done and the application should issue a new request.
1104    ///
1105    /// Multishot variants are available since kernel 6.0.
1106
1107    pub struct RecvMulti {
1108        fd: { impl sealed::UseFixed },
1109        buf_group: { u16 },
1110        ;;
1111        flags: i32 = 0,
1112    }
1113
1114    pub const CODE = sys::IORING_OP_RECV;
1115
1116    pub fn build(self) -> Entry {
1117        let RecvMulti { fd, buf_group, flags } = self;
1118
1119        let mut sqe = sqe_zeroed();
1120        sqe.opcode = Self::CODE;
1121        assign_fd!(sqe.fd = fd);
1122        sqe.__bindgen_anon_3.msg_flags = flags as _;
1123        sqe.__bindgen_anon_4.buf_group = buf_group;
1124        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1125        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1126        Entry(sqe)
1127    }
1128}
1129
1130opcode! {
1131    /// Open a file, equivalent to `openat2(2)`.
1132    pub struct OpenAt2 {
1133        dirfd: { impl sealed::UseFd },
1134        pathname: { *const libc::c_char },
1135        how: { *const types::OpenHow }
1136        ;;
1137        file_index: Option<types::DestinationSlot> = None,
1138    }
1139
1140    pub const CODE = sys::IORING_OP_OPENAT2;
1141
1142    pub fn build(self) -> Entry {
1143        let OpenAt2 { dirfd, pathname, how, file_index } = self;
1144
1145        let mut sqe = sqe_zeroed();
1146        sqe.opcode = Self::CODE;
1147        sqe.fd = dirfd;
1148        sqe.__bindgen_anon_2.addr = pathname as _;
1149        sqe.len = mem::size_of::<sys::open_how>() as _;
1150        sqe.__bindgen_anon_1.off = how as _;
1151        if let Some(dest) = file_index {
1152            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1153        }
1154        Entry(sqe)
1155    }
1156}
1157
1158opcode! {
1159    /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`.
1160    pub struct EpollCtl {
1161        epfd: { impl sealed::UseFixed },
1162        fd: { impl sealed::UseFd },
1163        op: { i32 },
1164        ev: { *const types::epoll_event },
1165        ;;
1166    }
1167
1168    pub const CODE = sys::IORING_OP_EPOLL_CTL;
1169
1170    pub fn build(self) -> Entry {
1171        let EpollCtl { epfd, fd, op, ev } = self;
1172
1173        let mut sqe = sqe_zeroed();
1174        sqe.opcode = Self::CODE;
1175        assign_fd!(sqe.fd = epfd);
1176        sqe.__bindgen_anon_2.addr = ev as _;
1177        sqe.len = op as _;
1178        sqe.__bindgen_anon_1.off = fd as _;
1179        Entry(sqe)
1180    }
1181}
1182
1183// === 5.7 ===
1184
1185opcode! {
1186    /// Splice data to/from a pipe, equivalent to `splice(2)`.
1187    ///
1188    /// if `fd_in` refers to a pipe, `off_in` must be `-1`;
1189    /// The description of `off_in` also applied to `off_out`.
1190    pub struct Splice {
1191        fd_in: { impl sealed::UseFixed },
1192        off_in: { i64 },
1193        fd_out: { impl sealed::UseFixed },
1194        off_out: { i64 },
1195        len: { u32 },
1196        ;;
1197        /// see man `splice(2)` for description of flags.
1198        flags: u32 = 0
1199    }
1200
1201    pub const CODE = sys::IORING_OP_SPLICE;
1202
1203    pub fn build(self) -> Entry {
1204        let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1205
1206        let mut sqe = sqe_zeroed();
1207        sqe.opcode = Self::CODE;
1208        assign_fd!(sqe.fd = fd_out);
1209        sqe.len = len;
1210        sqe.__bindgen_anon_1.off = off_out as _;
1211
1212        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1213            sealed::Target::Fd(fd) => fd,
1214            sealed::Target::Fixed(idx) => {
1215                flags |= sys::SPLICE_F_FD_IN_FIXED;
1216                idx as _
1217            }
1218        };
1219
1220        sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1221        sqe.__bindgen_anon_3.splice_flags = flags;
1222        Entry(sqe)
1223    }
1224}
1225
1226opcode! {
1227    /// Register `nbufs` buffers that each have the length `len` with ids starting from `bid` in the
1228    /// group `bgid` that can be used for any request. See
1229    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1230    pub struct ProvideBuffers {
1231        addr: { *mut u8 },
1232        len: { i32 },
1233        nbufs: { u16 },
1234        bgid: { u16 },
1235        bid: { u16 }
1236        ;;
1237    }
1238
1239    pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1240
1241    pub fn build(self) -> Entry {
1242        let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1243
1244        let mut sqe = sqe_zeroed();
1245        sqe.opcode = Self::CODE;
1246        sqe.fd = nbufs as _;
1247        sqe.__bindgen_anon_2.addr = addr as _;
1248        sqe.len = len as _;
1249        sqe.__bindgen_anon_1.off = bid as _;
1250        sqe.__bindgen_anon_4.buf_group = bgid;
1251        Entry(sqe)
1252    }
1253}
1254
1255opcode! {
1256    /// Remove some number of buffers from a buffer group. See
1257    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1258    pub struct RemoveBuffers {
1259        nbufs: { u16 },
1260        bgid: { u16 }
1261        ;;
1262    }
1263
1264    pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1265
1266    pub fn build(self) -> Entry {
1267        let RemoveBuffers { nbufs, bgid } = self;
1268
1269        let mut sqe = sqe_zeroed();
1270        sqe.opcode = Self::CODE;
1271        sqe.fd = nbufs as _;
1272        sqe.__bindgen_anon_4.buf_group = bgid;
1273        Entry(sqe)
1274    }
1275}
1276
1277// === 5.8 ===
1278
1279opcode! {
1280    /// Duplicate pipe content, equivalent to `tee(2)`.
1281    pub struct Tee {
1282        fd_in: { impl sealed::UseFixed },
1283        fd_out: { impl sealed::UseFixed },
1284        len: { u32 }
1285        ;;
1286        flags: u32 = 0
1287    }
1288
1289    pub const CODE = sys::IORING_OP_TEE;
1290
1291    pub fn build(self) -> Entry {
1292        let Tee { fd_in, fd_out, len, mut flags } = self;
1293
1294        let mut sqe = sqe_zeroed();
1295        sqe.opcode = Self::CODE;
1296
1297        assign_fd!(sqe.fd = fd_out);
1298        sqe.len = len;
1299
1300        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1301            sealed::Target::Fd(fd) => fd,
1302            sealed::Target::Fixed(idx) => {
1303                flags |= sys::SPLICE_F_FD_IN_FIXED;
1304                idx as _
1305            }
1306        };
1307
1308        sqe.__bindgen_anon_3.splice_flags = flags;
1309
1310        Entry(sqe)
1311    }
1312}
1313
1314// === 5.11 ===
1315
1316opcode! {
1317    /// Shut down all or part of a full duplex connection on a socket, equivalent to `shutdown(2)`.
1318    /// Available since kernel 5.11.
1319    pub struct Shutdown {
1320        fd: { impl sealed::UseFixed },
1321        how: { i32 },
1322        ;;
1323    }
1324
1325    pub const CODE = sys::IORING_OP_SHUTDOWN;
1326
1327    pub fn build(self) -> Entry {
1328        let Shutdown { fd, how } = self;
1329
1330        let mut sqe = sqe_zeroed();
1331        sqe.opcode = Self::CODE;
1332        assign_fd!(sqe.fd = fd);
1333        sqe.len = how as _;
1334        Entry(sqe)
1335    }
1336}
1337
1338opcode! {
1339    // Change the name or location of a file, equivalent to `renameat2(2)`.
1340    // Available since kernel 5.11.
1341    pub struct RenameAt {
1342        olddirfd: { impl sealed::UseFd },
1343        oldpath: { *const libc::c_char },
1344        newdirfd: { impl sealed::UseFd },
1345        newpath: { *const libc::c_char },
1346        ;;
1347        flags: u32 = 0
1348    }
1349
1350    pub const CODE = sys::IORING_OP_RENAMEAT;
1351
1352    pub fn build(self) -> Entry {
1353        let RenameAt {
1354            olddirfd, oldpath,
1355            newdirfd, newpath,
1356            flags
1357        } = self;
1358
1359        let mut sqe = sqe_zeroed();
1360        sqe.opcode = Self::CODE;
1361        sqe.fd = olddirfd;
1362        sqe.__bindgen_anon_2.addr = oldpath as _;
1363        sqe.len = newdirfd as _;
1364        sqe.__bindgen_anon_1.off = newpath as _;
1365        sqe.__bindgen_anon_3.rename_flags = flags;
1366        Entry(sqe)
1367    }
1368}
1369
1370opcode! {
1371    // Delete a name and possible the file it refers to, equivalent to `unlinkat(2)`.
1372    // Available since kernel 5.11.
1373    pub struct UnlinkAt {
1374        dirfd: { impl sealed::UseFd },
1375        pathname: { *const libc::c_char },
1376        ;;
1377        flags: i32 = 0
1378    }
1379
1380    pub const CODE = sys::IORING_OP_UNLINKAT;
1381
1382    pub fn build(self) -> Entry {
1383        let UnlinkAt { dirfd, pathname, flags } = self;
1384
1385        let mut sqe = sqe_zeroed();
1386        sqe.opcode = Self::CODE;
1387        sqe.fd = dirfd;
1388        sqe.__bindgen_anon_2.addr = pathname as _;
1389        sqe.__bindgen_anon_3.unlink_flags = flags as _;
1390        Entry(sqe)
1391    }
1392}
1393
1394// === 5.15 ===
1395
1396opcode! {
1397    /// Make a directory, equivalent to `mkdirat(2)`.
1398    pub struct MkDirAt {
1399        dirfd: { impl sealed::UseFd },
1400        pathname: { *const libc::c_char },
1401        ;;
1402        mode: libc::mode_t = 0
1403    }
1404
1405    pub const CODE = sys::IORING_OP_MKDIRAT;
1406
1407    pub fn build(self) -> Entry {
1408        let MkDirAt { dirfd, pathname, mode } = self;
1409
1410        let mut sqe = sqe_zeroed();
1411        sqe.opcode = Self::CODE;
1412        sqe.fd = dirfd;
1413        sqe.__bindgen_anon_2.addr = pathname as _;
1414        sqe.len = mode;
1415        Entry(sqe)
1416    }
1417}
1418
1419opcode! {
1420    /// Create a symlink, equivalent to `symlinkat(2)`.
1421    pub struct SymlinkAt {
1422        newdirfd: { impl sealed::UseFd },
1423        target: { *const libc::c_char },
1424        linkpath: { *const libc::c_char },
1425        ;;
1426    }
1427
1428    pub const CODE = sys::IORING_OP_SYMLINKAT;
1429
1430    pub fn build(self) -> Entry {
1431        let SymlinkAt { newdirfd, target, linkpath } = self;
1432
1433        let mut sqe = sqe_zeroed();
1434        sqe.opcode = Self::CODE;
1435        sqe.fd = newdirfd;
1436        sqe.__bindgen_anon_2.addr = target as _;
1437        sqe.__bindgen_anon_1.addr2 = linkpath as _;
1438        Entry(sqe)
1439    }
1440}
1441
1442opcode! {
1443    /// Create a hard link, equivalent to `linkat(2)`.
1444    pub struct LinkAt {
1445        olddirfd: { impl sealed::UseFd },
1446        oldpath: { *const libc::c_char },
1447        newdirfd: { impl sealed::UseFd },
1448        newpath: { *const libc::c_char },
1449        ;;
1450        flags: i32 = 0
1451    }
1452
1453    pub const CODE = sys::IORING_OP_LINKAT;
1454
1455    pub fn build(self) -> Entry {
1456        let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1457
1458        let mut sqe = sqe_zeroed();
1459        sqe.opcode = Self::CODE;
1460        sqe.fd = olddirfd as _;
1461        sqe.__bindgen_anon_2.addr = oldpath as _;
1462        sqe.len = newdirfd as _;
1463        sqe.__bindgen_anon_1.addr2 = newpath as _;
1464        sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1465        Entry(sqe)
1466    }
1467}
1468
1469// === 5.17 ===
1470
1471opcode! {
1472    /// Get extended attribute, equivalent to `getxattr(2)`.
1473    pub struct GetXattr {
1474        name: { *const libc::c_char },
1475        value: { *mut libc::c_void },
1476        path: { *const libc::c_char },
1477        len: { u32 },
1478        ;;
1479    }
1480
1481    pub const CODE = sys::IORING_OP_GETXATTR;
1482
1483    pub fn build(self) -> Entry {
1484        let GetXattr { name, value, path, len } = self;
1485
1486        let mut sqe = sqe_zeroed();
1487        sqe.opcode = Self::CODE;
1488        sqe.__bindgen_anon_2.addr = name as _;
1489        sqe.len = len;
1490        sqe.__bindgen_anon_1.off = value as _;
1491        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1492        sqe.__bindgen_anon_3.xattr_flags = 0;
1493        Entry(sqe)
1494    }
1495}
1496
1497opcode! {
1498    /// Set extended attribute, equivalent to `setxattr(2)`.
1499    pub struct SetXattr {
1500        name: { *const libc::c_char },
1501        value: { *const libc::c_void },
1502        path: { *const libc::c_char },
1503        len: { u32 },
1504        ;;
1505        flags: i32 = 0
1506    }
1507
1508    pub const CODE = sys::IORING_OP_SETXATTR;
1509
1510    pub fn build(self) -> Entry {
1511        let SetXattr { name, value, path, flags, len } = self;
1512
1513        let mut sqe = sqe_zeroed();
1514        sqe.opcode = Self::CODE;
1515        sqe.__bindgen_anon_2.addr = name as _;
1516        sqe.len = len;
1517        sqe.__bindgen_anon_1.off = value as _;
1518        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1519        sqe.__bindgen_anon_3.xattr_flags = flags as _;
1520        Entry(sqe)
1521    }
1522}
1523
1524opcode! {
1525    /// Get extended attribute from a file descriptor, equivalent to `fgetxattr(2)`.
1526    pub struct FGetXattr {
1527        fd: { impl sealed::UseFixed },
1528        name: { *const libc::c_char },
1529        value: { *mut libc::c_void },
1530        len: { u32 },
1531        ;;
1532    }
1533
1534    pub const CODE = sys::IORING_OP_FGETXATTR;
1535
1536    pub fn build(self) -> Entry {
1537        let FGetXattr { fd, name, value, len } = self;
1538
1539        let mut sqe = sqe_zeroed();
1540        sqe.opcode = Self::CODE;
1541        assign_fd!(sqe.fd = fd);
1542        sqe.__bindgen_anon_2.addr = name as _;
1543        sqe.len = len;
1544        sqe.__bindgen_anon_1.off = value as _;
1545        sqe.__bindgen_anon_3.xattr_flags = 0;
1546        Entry(sqe)
1547    }
1548}
1549
1550opcode! {
1551    /// Set extended attribute on a file descriptor, equivalent to `fsetxattr(2)`.
1552    pub struct FSetXattr {
1553        fd: { impl sealed::UseFixed },
1554        name: { *const libc::c_char },
1555        value: { *const libc::c_void },
1556        len: { u32 },
1557        ;;
1558        flags: i32 = 0
1559    }
1560
1561    pub const CODE = sys::IORING_OP_FSETXATTR;
1562
1563    pub fn build(self) -> Entry {
1564        let FSetXattr { fd, name, value, flags, len } = self;
1565
1566        let mut sqe = sqe_zeroed();
1567        sqe.opcode = Self::CODE;
1568        assign_fd!(sqe.fd = fd);
1569        sqe.__bindgen_anon_2.addr = name as _;
1570        sqe.len = len;
1571        sqe.__bindgen_anon_1.off = value as _;
1572        sqe.__bindgen_anon_3.xattr_flags = flags as _;
1573        Entry(sqe)
1574    }
1575}
1576
1577// === 5.18 ===
1578
1579opcode! {
1580    /// Send a message (with data) to a target ring.
1581    pub struct MsgRingData {
1582        ring_fd: { impl sealed::UseFd },
1583        result: { i32 },
1584        user_data: { u64 },
1585        user_flags: { Option<u32> },
1586        ;;
1587        opcode_flags: u32 = 0
1588    }
1589
1590    pub const CODE = sys::IORING_OP_MSG_RING;
1591
1592    pub fn build(self) -> Entry {
1593        let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1594
1595        let mut sqe = sqe_zeroed();
1596        sqe.opcode = Self::CODE;
1597        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1598        sqe.fd = ring_fd;
1599        sqe.len = result as u32;
1600        sqe.__bindgen_anon_1.off = user_data;
1601        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1602        if let Some(flags) = user_flags {
1603            sqe.__bindgen_anon_5.file_index = flags;
1604            unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1605        }
1606        Entry(sqe)
1607    }
1608}
1609
1610// === 5.19 ===
1611
1612opcode! {
1613    /// Attempt to cancel an already issued request, receiving a cancellation
1614    /// builder, which allows for the new cancel criterias introduced since
1615    /// 5.19.
1616    pub struct AsyncCancel2 {
1617        builder: { types::CancelBuilder }
1618        ;;
1619    }
1620
1621    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1622
1623    pub fn build(self) -> Entry {
1624        let AsyncCancel2 { builder } = self;
1625
1626        let mut sqe = sqe_zeroed();
1627        sqe.opcode = Self::CODE;
1628        sqe.fd = builder.to_fd();
1629        sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1630        sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1631        Entry(sqe)
1632    }
1633}
1634
1635opcode! {
1636    /// A file/device-specific 16-byte command, akin (but not equivalent) to `ioctl(2)`.
1637    pub struct UringCmd16 {
1638        fd: { impl sealed::UseFixed },
1639        cmd_op: { u32 },
1640        ;;
1641        /// The `buf_index` is an index into an array of fixed buffers,
1642        /// and is only valid if fixed buffers were registered.
1643        buf_index: Option<u16> = None,
1644        /// Arbitrary command data.
1645        cmd: [u8; 16] = [0u8; 16]
1646    }
1647
1648    pub const CODE = sys::IORING_OP_URING_CMD;
1649
1650    pub fn build(self) -> Entry {
1651        let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1652
1653        let mut sqe = sqe_zeroed();
1654        sqe.opcode = Self::CODE;
1655        assign_fd!(sqe.fd = fd);
1656        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1657        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1658        if let Some(buf_index) = buf_index {
1659            sqe.__bindgen_anon_4.buf_index = buf_index;
1660            unsafe {
1661                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1662            }
1663        }
1664        Entry(sqe)
1665    }
1666}
1667
1668opcode! {
1669    /// A file/device-specific 80-byte command, akin (but not equivalent) to `ioctl(2)`.
1670    pub struct UringCmd80 {
1671        fd: { impl sealed::UseFixed },
1672        cmd_op: { u32 },
1673        ;;
1674        /// The `buf_index` is an index into an array of fixed buffers,
1675        /// and is only valid if fixed buffers were registered.
1676        buf_index: Option<u16> = None,
1677        /// Arbitrary command data.
1678        cmd: [u8; 80] = [0u8; 80]
1679    }
1680
1681    pub const CODE = sys::IORING_OP_URING_CMD;
1682
1683    pub fn build(self) -> Entry128 {
1684        let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1685
1686        let cmd1 = cmd[..16].try_into().unwrap();
1687        let cmd2 = cmd[16..].try_into().unwrap();
1688
1689        let mut sqe = sqe_zeroed();
1690        sqe.opcode = Self::CODE;
1691        assign_fd!(sqe.fd = fd);
1692        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1693        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1694        if let Some(buf_index) = buf_index {
1695            sqe.__bindgen_anon_4.buf_index = buf_index;
1696            unsafe {
1697                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1698            }
1699        }
1700        Entry128(Entry(sqe), cmd2)
1701    }
1702}
1703
1704opcode! {
1705    /// Create an endpoint for communication, equivalent to `socket(2)`.
1706    ///
1707    /// If the `file_index` argument is set, the resulting socket is
1708    /// directly mapped to the given fixed-file slot instead of being
1709    /// returned as a normal file descriptor. The application must first
1710    /// have registered a file table, and the target slot should fit into
1711    /// it.
1712    ///
1713    /// Available since 5.19.
1714    pub struct Socket {
1715        domain: { i32 },
1716        socket_type: { i32 },
1717        protocol: { i32 },
1718        ;;
1719        file_index: Option<types::DestinationSlot> = None,
1720        flags: types::RwFlags = 0,
1721    }
1722
1723    pub const CODE = sys::IORING_OP_SOCKET;
1724
1725    pub fn build(self) -> Entry {
1726        let Socket { domain, socket_type, protocol, file_index, flags } = self;
1727
1728        let mut sqe = sqe_zeroed();
1729        sqe.opcode = Self::CODE;
1730        sqe.fd = domain as _;
1731        sqe.__bindgen_anon_1.off = socket_type as _;
1732        sqe.len = protocol as _;
1733        sqe.__bindgen_anon_3.rw_flags = flags;
1734        if let Some(dest) = file_index {
1735            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1736        }
1737        Entry(sqe)
1738    }
1739}
1740
1741opcode! {
1742    /// Accept multiple new connections on a socket.
1743    ///
1744    /// Set the `allocate_file_index` property if fixed file table entries should be used.
1745    ///
1746    /// Available since 5.19.
1747    pub struct AcceptMulti {
1748        fd: { impl sealed::UseFixed },
1749        ;;
1750        allocate_file_index: bool = false,
1751        flags: i32 = 0
1752    }
1753
1754    pub const CODE = sys::IORING_OP_ACCEPT;
1755
1756    pub fn build(self) -> Entry {
1757        let AcceptMulti { fd, allocate_file_index, flags } = self;
1758
1759        let mut sqe = sqe_zeroed();
1760        sqe.opcode = Self::CODE;
1761        assign_fd!(sqe.fd = fd);
1762        sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1763        // No out SockAddr is passed for the multishot accept case.
1764        // The user should perform a syscall to get any resulting connection's remote address.
1765        sqe.__bindgen_anon_3.accept_flags = flags as _;
1766        if allocate_file_index {
1767            sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1768        }
1769        Entry(sqe)
1770    }
1771}
1772
1773// === 6.0 ===
1774
1775opcode! {
1776    /// Send a message (with fixed FD) to a target ring.
1777    pub struct MsgRingSendFd {
1778        ring_fd: { impl sealed::UseFd },
1779        fixed_slot_src: { types::Fixed },
1780        dest_slot_index: { types::DestinationSlot },
1781        user_data: { u64 },
1782        ;;
1783        opcode_flags: u32 = 0
1784    }
1785
1786    pub const CODE = sys::IORING_OP_MSG_RING;
1787
1788    pub fn build(self) -> Entry {
1789        let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1790
1791        let mut sqe = sqe_zeroed();
1792        sqe.opcode = Self::CODE;
1793        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1794        sqe.fd = ring_fd;
1795        sqe.__bindgen_anon_1.off = user_data;
1796        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1797        sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1798        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1799        Entry(sqe)
1800    }
1801}
1802
1803// === 6.0 ===
1804
1805opcode! {
1806    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1807    ///
1808    /// When `dest_addr` is non-zero it points to the address of the target with `dest_addr_len`
1809    /// specifying its size, turning the request into a `sendto(2)`
1810    ///
1811    /// A fixed (pre-mapped) buffer can optionally be used from pre-mapped buffers that have been
1812    /// previously registered with [`Submitter::register_buffers`](crate::Submitter::register_buffers).
1813    ///
1814    /// This operation might result in two completion queue entries.
1815    /// See the `IORING_OP_SEND_ZC` section at [io_uring_enter][] for the exact semantics.
1816    /// Notifications posted by this operation can be checked with [notif](crate::cqueue::notif).
1817    ///
1818    /// [io_uring_enter]: https://man7.org/linux/man-pages/man2/io_uring_enter.2.html
1819    pub struct SendZc {
1820        fd: { impl sealed::UseFixed },
1821        buf: { *const u8 },
1822        len: { u32 },
1823        ;;
1824        /// The `buf_index` is an index into an array of fixed buffers, and is only valid if fixed
1825        /// buffers were registered.
1826        ///
1827        /// The buf and len arguments must fall within a region specified by buf_index in the
1828        /// previously registered buffer. The buffer need not be aligned with the start of the
1829        /// registered buffer.
1830        buf_index: Option<u16> = None,
1831        dest_addr: *const libc::sockaddr = core::ptr::null(),
1832        dest_addr_len: libc::socklen_t = 0,
1833        flags: i32 = 0,
1834        zc_flags: u16 = 0,
1835    }
1836
1837    pub const CODE = sys::IORING_OP_SEND_ZC;
1838
1839    pub fn build(self) -> Entry {
1840        let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1841
1842        let mut sqe = sqe_zeroed();
1843        sqe.opcode = Self::CODE;
1844        assign_fd!(sqe.fd = fd);
1845        sqe.__bindgen_anon_2.addr = buf as _;
1846        sqe.len = len;
1847        sqe.__bindgen_anon_3.msg_flags = flags as _;
1848        sqe.ioprio = zc_flags;
1849        if let Some(buf_index) = buf_index {
1850            sqe.__bindgen_anon_4.buf_index = buf_index;
1851            sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1852        }
1853        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1854        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1855        Entry(sqe)
1856    }
1857}
1858
1859// === 6.1 ===
1860
1861opcode! {
1862    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1863    ///
1864    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
1865    /// structure, and flags holds the flags associated with the system call.
1866    #[derive(Debug)]
1867    pub struct SendMsgZc {
1868        fd: { impl sealed::UseFixed },
1869        msg: { *const libc::msghdr },
1870        ;;
1871        ioprio: u16 = 0,
1872        flags: u32 = 0
1873    }
1874
1875    pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1876
1877    pub fn build(self) -> Entry {
1878        let SendMsgZc { fd, msg, ioprio, flags } = self;
1879
1880        let mut sqe = sqe_zeroed();
1881        sqe.opcode = Self::CODE;
1882        assign_fd!(sqe.fd = fd);
1883        sqe.ioprio = ioprio;
1884        sqe.__bindgen_anon_2.addr = msg as _;
1885        sqe.len = 1;
1886        sqe.__bindgen_anon_3.msg_flags = flags;
1887        Entry(sqe)
1888    }
1889}
1890
1891// === 6.7 ===
1892
1893opcode! {
1894    /// Issue the equivalent of `pread(2)` with multi-shot semantics.
1895    pub struct ReadMulti {
1896        fd: { impl sealed::UseFixed },
1897        len: { u32 },
1898        buf_group: { u16 },
1899        ;;
1900        offset: u64 = 0,
1901    }
1902
1903    pub const CODE = sys::IORING_OP_READ_MULTISHOT;
1904
1905    pub fn build(self) -> Entry {
1906        let Self { fd, len, buf_group, offset } = self;
1907
1908        let mut sqe = sqe_zeroed();
1909        sqe.opcode = Self::CODE;
1910        assign_fd!(sqe.fd = fd);
1911        sqe.__bindgen_anon_1.off = offset;
1912        sqe.len = len;
1913        sqe.__bindgen_anon_4.buf_group = buf_group;
1914        sqe.flags = crate::squeue::Flags::BUFFER_SELECT.bits();
1915        Entry(sqe)
1916    }
1917}
1918
1919opcode! {
1920    /// Wait on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAIT_BITSET`.
1921    ///
1922    /// Wait on a futex at address `futex` and which still has the value `val` and with `futex2(2)`
1923    /// flags of `futex_flags`. `musk` can be set to a specific bitset mask, which will be matched
1924    /// by the waking side to decide who to wake up. To always get woken, an application may use
1925    /// `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits). `futex_flags` follows the `futex2(2)`
1926    /// flags, not the `futex(2)` v1 interface flags. `flags` are currently unused and hence `0`
1927    /// must be passed.
1928    #[derive(Debug)]
1929    pub struct FutexWait {
1930        futex: { *const u32 },
1931        val: { u64 },
1932        mask: { u64 },
1933        futex_flags: { u32 },
1934        ;;
1935        flags: u32 = 0
1936    }
1937
1938    pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1939
1940    pub fn build(self) -> Entry {
1941        let FutexWait { futex, val, mask, futex_flags, flags } = self;
1942
1943        let mut sqe = sqe_zeroed();
1944        sqe.opcode = Self::CODE;
1945        sqe.fd = futex_flags as _;
1946        sqe.__bindgen_anon_2.addr = futex as usize as _;
1947        sqe.__bindgen_anon_1.off = val;
1948        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1949        sqe.__bindgen_anon_3.futex_flags = flags;
1950        Entry(sqe)
1951    }
1952}
1953
1954opcode! {
1955    /// Wake up waiters on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAKE_BITSET`.
1956    ///
1957    /// Wake any waiters on the futex indicated by `futex` and at most `val` futexes. `futex_flags`
1958    /// indicates the `futex2(2)` modifier flags. If a given bitset for who to wake is desired,
1959    /// then that must be set in `mask`. Use `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits) to
1960    /// match any waiter on the given futex. `flags` are currently unused and hence `0` must be
1961    /// passed.
1962    #[derive(Debug)]
1963    pub struct FutexWake {
1964        futex: { *const u32 },
1965        val: { u64 },
1966        mask: { u64 },
1967        futex_flags: { u32 },
1968        ;;
1969        flags: u32 = 0
1970    }
1971
1972    pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1973
1974    pub fn build(self) -> Entry {
1975        let FutexWake { futex, val, mask, futex_flags, flags } = self;
1976
1977        let mut sqe = sqe_zeroed();
1978        sqe.opcode = Self::CODE;
1979        sqe.fd = futex_flags as _;
1980        sqe.__bindgen_anon_2.addr = futex as usize as _;
1981        sqe.__bindgen_anon_1.off = val;
1982        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1983        sqe.__bindgen_anon_3.futex_flags = flags;
1984        Entry(sqe)
1985    }
1986}
1987
1988opcode! {
1989    /// Wait on multiple futexes.
1990    ///
1991    /// Wait on multiple futexes at the same time. Futexes are given by `futexv` and `nr_futex` is
1992    /// the number of futexes in that array. Unlike `FutexWait`, the desired bitset mask and values
1993    /// are passed in `futexv`. `flags` are currently unused and hence `0` must be passed.
1994    #[derive(Debug)]
1995    pub struct FutexWaitV {
1996        futexv: { *const types::FutexWaitV },
1997        nr_futex: { u32 },
1998        ;;
1999        flags: u32 = 0
2000    }
2001
2002    pub const CODE = sys::IORING_OP_FUTEX_WAITV;
2003
2004    pub fn build(self) -> Entry {
2005        let FutexWaitV { futexv, nr_futex, flags } = self;
2006
2007        let mut sqe = sqe_zeroed();
2008        sqe.opcode = Self::CODE;
2009        sqe.__bindgen_anon_2.addr = futexv as usize as _;
2010        sqe.len = nr_futex;
2011        sqe.__bindgen_anon_3.futex_flags = flags;
2012        Entry(sqe)
2013    }
2014}
2015
2016opcode! {
2017    /// Issue the equivalent of a `waitid(2)` system call.
2018    ///
2019    /// Available since kernel 6.7.
2020    #[derive(Debug)]
2021    pub struct WaitId {
2022        idtype: { libc::idtype_t },
2023        id: { libc::id_t },
2024        options: { libc::c_int },
2025        ;;
2026        infop: *const libc::siginfo_t = std::ptr::null(),
2027        flags: libc::c_uint = 0,
2028    }
2029
2030    pub const CODE = sys::IORING_OP_WAITID;
2031
2032    pub fn build(self) -> Entry {
2033        let mut sqe = sqe_zeroed();
2034        sqe.opcode = Self::CODE;
2035        sqe.fd = self.id as _;
2036        sqe.len = self.idtype as _;
2037        sqe.__bindgen_anon_3.waitid_flags = self.flags;
2038        sqe.__bindgen_anon_5.file_index = self.options as _;
2039        sqe.__bindgen_anon_1.addr2 = self.infop as _;
2040        Entry(sqe)
2041    }
2042}
2043
2044// === 6.8 ===
2045
2046opcode! {
2047    /// Install a fixed file descriptor
2048    ///
2049    /// Turns a direct descriptor into a regular file descriptor that can be later used by regular
2050    /// system calls that take a normal raw file descriptor
2051    #[derive(Debug)]
2052    pub struct FixedFdInstall {
2053        fd: { types::Fixed },
2054        file_flags: { u32 },
2055        ;;
2056    }
2057
2058    pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
2059
2060    pub fn build(self) -> Entry {
2061        let FixedFdInstall { fd, file_flags } = self;
2062
2063        let mut sqe = sqe_zeroed();
2064        sqe.opcode = Self::CODE;
2065        sqe.fd = fd.0 as _;
2066        sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
2067        sqe.__bindgen_anon_3.install_fd_flags = file_flags;
2068        Entry(sqe)
2069    }
2070}
2071
2072// === 6.9 ===
2073
2074opcode! {
2075    /// Perform file truncation, equivalent to `ftruncate(2)`.
2076    #[derive(Debug)]
2077    pub struct Ftruncate {
2078        fd: { impl sealed::UseFixed },
2079        len: { u64 },
2080        ;;
2081    }
2082
2083    pub const CODE = sys::IORING_OP_FTRUNCATE;
2084
2085    pub fn build(self) -> Entry {
2086        let Ftruncate { fd, len } = self;
2087
2088        let mut sqe = sqe_zeroed();
2089        sqe.opcode = Self::CODE;
2090        assign_fd!(sqe.fd = fd);
2091        sqe.__bindgen_anon_1.off = len;
2092        Entry(sqe)
2093    }
2094}
2095
2096// === 6.10 ===
2097
2098opcode! {
2099    /// Send a bundle of messages on a socket in a single request.
2100    pub struct SendBundle {
2101        fd: { impl sealed::UseFixed },
2102        buf_group: { u16 },
2103        ;;
2104        flags: i32 = 0,
2105        len: u32 = 0
2106    }
2107
2108    pub const CODE = sys::IORING_OP_SEND;
2109
2110    pub fn build(self) -> Entry {
2111        let SendBundle { fd, len, flags, buf_group } = self;
2112
2113        let mut sqe = sqe_zeroed();
2114        sqe.opcode = Self::CODE;
2115        assign_fd!(sqe.fd = fd);
2116        sqe.len = len;
2117        sqe.__bindgen_anon_3.msg_flags = flags as _;
2118        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2119        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2120        sqe.__bindgen_anon_4.buf_group = buf_group;
2121        Entry(sqe)
2122    }
2123}
2124
2125opcode! {
2126    /// Receive a bundle of buffers from a socket.
2127    ///
2128    /// Parameter
2129    ///     buf_group: The id of the provided buffer pool to use for the bundle.
2130    ///
2131    /// Note that as of kernel 6.10 first recv always gets a single buffer, while second
2132    /// obtains the bundle of remaining buffers. This behavior may change in the future.
2133    ///
2134    /// Bundle variant is available since kernel 6.10
2135    pub struct RecvBundle {
2136        fd: { impl sealed::UseFixed },
2137        buf_group: { u16 },
2138        ;;
2139        flags: i32 = 0
2140    }
2141
2142    pub const CODE = sys::IORING_OP_RECV;
2143
2144    pub fn build(self) -> Entry {
2145        let RecvBundle { fd, buf_group, flags } = self;
2146
2147        let mut sqe = sqe_zeroed();
2148        sqe.opcode = Self::CODE;
2149        assign_fd!(sqe.fd = fd);
2150        sqe.__bindgen_anon_3.msg_flags = flags as _;
2151        sqe.__bindgen_anon_4.buf_group = buf_group;
2152        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2153        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2154        Entry(sqe)
2155    }
2156}
2157
2158opcode! {
2159    /// Receive multiple messages from a socket as a bundle.
2160    ///
2161    /// Parameter:
2162    ///     buf_group: The id of the provided buffer pool to use for each received message.
2163    ///
2164    /// MSG_WAITALL should not be set in flags.
2165    ///
2166    /// The multishot version allows the application to issue a single receive request, which
2167    /// repeatedly posts a CQE when data is available. Each CQE will take a bundle of buffers
2168    /// out of a provided buffer pool for receiving. The application should check the flags of each CQE,
2169    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
2170    /// the multishot receive will be done and the application should issue a new request.
2171    ///
2172    /// Note that as of kernel 6.10 first CQE always gets a single buffer, while second
2173    /// obtains the bundle of remaining buffers. This behavior may change in the future.
2174    ///
2175    /// Multishot bundle variant is available since kernel 6.10.
2176    pub struct RecvMultiBundle {
2177        fd: { impl sealed::UseFixed },
2178        buf_group: { u16 },
2179        ;;
2180        flags: i32 = 0
2181    }
2182
2183    pub const CODE = sys::IORING_OP_RECV;
2184
2185    pub fn build(self) -> Entry {
2186        let RecvMultiBundle { fd, buf_group, flags } = self;
2187
2188        let mut sqe = sqe_zeroed();
2189        sqe.opcode = Self::CODE;
2190        assign_fd!(sqe.fd = fd);
2191        sqe.__bindgen_anon_3.msg_flags = flags as _;
2192        sqe.__bindgen_anon_4.buf_group = buf_group;
2193        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2194        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2195        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2196        Entry(sqe)
2197    }
2198}
2199
2200// === 6.11 ===
2201
2202opcode! {
2203    /// Bind a socket, equivalent to `bind(2)`.
2204    pub struct Bind {
2205        fd: { impl sealed::UseFixed },
2206        addr: { *const libc::sockaddr },
2207        addrlen: { libc::socklen_t }
2208        ;;
2209    }
2210
2211    pub const CODE = sys::IORING_OP_BIND;
2212
2213    pub fn build(self) -> Entry {
2214        let Bind { fd, addr, addrlen } = self;
2215
2216        let mut sqe = sqe_zeroed();
2217        sqe.opcode = Self::CODE;
2218        assign_fd!(sqe.fd = fd);
2219        sqe.__bindgen_anon_2.addr = addr as _;
2220        sqe.__bindgen_anon_1.off = addrlen as _;
2221        Entry(sqe)
2222    }
2223}
2224
2225opcode! {
2226    /// Listen on a socket, equivalent to `listen(2)`.
2227    pub struct Listen {
2228        fd: { impl sealed::UseFixed },
2229        backlog: { i32 },
2230        ;;
2231    }
2232
2233    pub const CODE = sys::IORING_OP_LISTEN;
2234
2235    pub fn build(self) -> Entry {
2236        let Listen { fd, backlog } = self;
2237
2238        let mut sqe = sqe_zeroed();
2239        sqe.opcode = Self::CODE;
2240        assign_fd!(sqe.fd = fd);
2241        sqe.len = backlog as _;
2242        Entry(sqe)
2243    }
2244}
2245
2246// === 6.15 ===
2247
2248opcode! {
2249    /// Issue the zerocopy equivalent of a `recv(2)` system call.
2250    pub struct RecvZc {
2251        fd: { impl sealed::UseFixed },
2252        len: { u32 },
2253        ;;
2254        ifq: u32 = 0,
2255        ioprio: u16 = 0,
2256    }
2257
2258    pub const CODE = sys::IORING_OP_RECV_ZC;
2259
2260    pub fn build(self) -> Entry {
2261        let Self { fd, len, ifq, ioprio } = self;
2262
2263        let mut sqe = sqe_zeroed();
2264        sqe.opcode = Self::CODE;
2265        assign_fd!(sqe.fd = fd);
2266        sqe.len = len;
2267        sqe.ioprio = ioprio | sys::IORING_RECV_MULTISHOT as u16;
2268        sqe.__bindgen_anon_5.zcrx_ifq_idx = ifq;
2269        Entry(sqe)
2270    }
2271}
2272
2273opcode! {
2274    /// Issue the equivalent of a `epoll_wait(2)` system call.
2275    pub struct EpollWait {
2276        fd: { impl sealed::UseFixed },
2277        events: { *mut types::epoll_event },
2278        max_events: { u32 },
2279        ;;
2280        flags: u32 = 0,
2281    }
2282
2283    pub const CODE = sys::IORING_OP_EPOLL_WAIT;
2284
2285    pub fn build(self) -> Entry {
2286        let Self { fd, events, max_events, flags } = self;
2287
2288        let mut sqe = sqe_zeroed();
2289        sqe.opcode = Self::CODE;
2290        assign_fd!(sqe.fd = fd);
2291        sqe.__bindgen_anon_2.addr = events as u64;
2292        sqe.len = max_events;
2293        sqe.__bindgen_anon_3.poll32_events = flags;
2294        Entry(sqe)
2295    }
2296}
2297
2298opcode! {
2299    /// Vectored read into a fixed buffer, equivalent to `preadv2(2)`.
2300    pub struct ReadvFixed {
2301        fd: { impl sealed::UseFixed },
2302        iovec: { *const ::libc::iovec },
2303        len: { u32 },
2304        buf_index: { u16 },
2305        ;;
2306        ioprio: u16 = 0,
2307        offset: u64 = 0,
2308        rw_flags: i32 = 0,
2309    }
2310
2311    pub const CODE = sys::IORING_OP_READV_FIXED;
2312
2313    pub fn build(self) -> Entry {
2314        let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2315
2316        let mut sqe = sqe_zeroed();
2317        sqe.opcode = Self::CODE;
2318        assign_fd!(sqe.fd = fd);
2319        sqe.__bindgen_anon_1.off = offset as _;
2320        sqe.__bindgen_anon_2.addr = iovec as _;
2321        sqe.len = len;
2322        sqe.__bindgen_anon_4.buf_index = buf_index;
2323        sqe.ioprio = ioprio;
2324        sqe.__bindgen_anon_3.rw_flags = rw_flags;
2325        Entry(sqe)
2326    }
2327}
2328
2329opcode! {
2330    /// Vectored write from a fixed buffer, equivalent to `pwritev2(2)`.
2331    pub struct WritevFixed {
2332        fd: { impl sealed::UseFixed },
2333        iovec: { *const ::libc::iovec },
2334        len: { u32 },
2335        buf_index: { u16 },
2336        ;;
2337        ioprio: u16 = 0,
2338        offset: u64 = 0,
2339        rw_flags: i32 = 0,
2340    }
2341
2342    pub const CODE = sys::IORING_OP_WRITEV_FIXED;
2343
2344    pub fn build(self) -> Entry {
2345        let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2346
2347        let mut sqe = sqe_zeroed();
2348        sqe.opcode = Self::CODE;
2349        assign_fd!(sqe.fd = fd);
2350        sqe.__bindgen_anon_1.off = offset as _;
2351        sqe.__bindgen_anon_2.addr = iovec as _;
2352        sqe.len = len;
2353        sqe.__bindgen_anon_4.buf_index = buf_index;
2354        sqe.ioprio = ioprio;
2355        sqe.__bindgen_anon_3.rw_flags = rw_flags;
2356        Entry(sqe)
2357    }
2358}