io_uring/
opcode.rs

1//! Operation codes that can be used to construct [`squeue::Entry`](crate::squeue::Entry)s.
2
3#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15    ( $sqe:ident . fd = $opfd:expr ) => {
16        match $opfd {
17            sealed::Target::Fd(fd) => $sqe.fd = fd,
18            sealed::Target::Fixed(idx) => {
19                $sqe.fd = idx as _;
20                $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21            }
22        }
23    };
24}
25
26macro_rules! opcode {
27    (@type impl sealed::UseFixed ) => {
28        sealed::Target
29    };
30    (@type impl sealed::UseFd ) => {
31        RawFd
32    };
33    (@type $name:ty ) => {
34        $name
35    };
36    (
37        $( #[$outer:meta] )*
38        pub struct $name:ident {
39            $( #[$new_meta:meta] )*
40
41            $( $field:ident : { $( $tnt:tt )+ } ),*
42
43            $(,)?
44
45            ;;
46
47            $(
48                $( #[$opt_meta:meta] )*
49                $opt_field:ident : $opt_tname:ty = $default:expr
50            ),*
51
52            $(,)?
53        }
54
55        pub const CODE = $opcode:expr;
56
57        $( #[$build_meta:meta] )*
58        pub fn build($self:ident) -> $entry:ty $build_block:block
59    ) => {
60        $( #[$outer] )*
61        pub struct $name {
62            $( $field : opcode!(@type $( $tnt )*), )*
63            $( $opt_field : $opt_tname, )*
64        }
65
66        impl $name {
67            $( #[$new_meta] )*
68            #[inline]
69            pub fn new($( $field : $( $tnt )* ),*) -> Self {
70                $name {
71                    $( $field: $field.into(), )*
72                    $( $opt_field: $default, )*
73                }
74            }
75
76            /// The opcode of the operation. This can be passed to
77            /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is
78            /// supported with the current kernel.
79            pub const CODE: u8 = $opcode as _;
80
81            $(
82                $( #[$opt_meta] )*
83                #[inline]
84                pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85                    self.$opt_field = $opt_field;
86                    self
87                }
88            )*
89
90            $( #[$build_meta] )*
91            #[inline]
92            pub fn build($self) -> $entry $build_block
93        }
94    }
95}
96
97/// inline zeroed to improve codegen
98#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100    unsafe { mem::zeroed() }
101}
102
103opcode! {
104    /// Do not perform any I/O.
105    ///
106    /// This is useful for testing the performance of the io_uring implementation itself.
107    #[derive(Debug)]
108    pub struct Nop { ;; }
109
110    pub const CODE = sys::IORING_OP_NOP;
111
112    pub fn build(self) -> Entry {
113        let Nop {} = self;
114
115        let mut sqe = sqe_zeroed();
116        sqe.opcode = Self::CODE;
117        sqe.fd = -1;
118        Entry(sqe)
119    }
120}
121
122opcode! {
123    /// Vectored read, equivalent to `preadv2(2)`.
124    #[derive(Debug)]
125    pub struct Readv {
126        fd: { impl sealed::UseFixed },
127        iovec: { *const libc::iovec },
128        len: { u32 },
129        ;;
130        ioprio: u16 = 0,
131        offset: u64 = 0,
132        /// specified for read operations, contains a bitwise OR of per-I/O flags,
133        /// as described in the `preadv2(2)` man page.
134        rw_flags: types::RwFlags = 0,
135        buf_group: u16 = 0
136    }
137
138    pub const CODE = sys::IORING_OP_READV;
139
140    pub fn build(self) -> Entry {
141        let Readv {
142            fd,
143            iovec, len, offset,
144            ioprio, rw_flags,
145            buf_group
146        } = self;
147
148        let mut sqe = sqe_zeroed();
149        sqe.opcode = Self::CODE;
150        assign_fd!(sqe.fd = fd);
151        sqe.ioprio = ioprio;
152        sqe.__bindgen_anon_2.addr = iovec as _;
153        sqe.len = len;
154        sqe.__bindgen_anon_1.off = offset;
155        sqe.__bindgen_anon_3.rw_flags = rw_flags;
156        sqe.__bindgen_anon_4.buf_group = buf_group;
157        Entry(sqe)
158    }
159}
160
161opcode! {
162    /// Vectored write, equivalent to `pwritev2(2)`.
163    #[derive(Debug)]
164    pub struct Writev {
165        fd: { impl sealed::UseFixed },
166        iovec: { *const libc::iovec },
167        len: { u32 },
168        ;;
169        ioprio: u16 = 0,
170        offset: u64 = 0,
171        /// specified for write operations, contains a bitwise OR of per-I/O flags,
172        /// as described in the `preadv2(2)` man page.
173        rw_flags: types::RwFlags = 0
174    }
175
176    pub const CODE = sys::IORING_OP_WRITEV;
177
178    pub fn build(self) -> Entry {
179        let Writev {
180            fd,
181            iovec, len, offset,
182            ioprio, rw_flags
183        } = self;
184
185        let mut sqe = sqe_zeroed();
186        sqe.opcode = Self::CODE;
187        assign_fd!(sqe.fd = fd);
188        sqe.ioprio = ioprio;
189        sqe.__bindgen_anon_2.addr = iovec as _;
190        sqe.len = len;
191        sqe.__bindgen_anon_1.off = offset;
192        sqe.__bindgen_anon_3.rw_flags = rw_flags;
193        Entry(sqe)
194    }
195}
196
197opcode! {
198    /// File sync, equivalent to `fsync(2)`.
199    ///
200    /// Note that, while I/O is initiated in the order in which it appears in the submission queue,
201    /// completions are unordered. For example, an application which places a write I/O followed by
202    /// an fsync in the submission queue cannot expect the fsync to apply to the write. The two
203    /// operations execute in parallel, so the fsync may complete before the write is issued to the
204    /// storage. The same is also true for previously issued writes that have not completed prior to
205    /// the fsync.
206    #[derive(Debug)]
207    pub struct Fsync {
208        fd: { impl sealed::UseFixed },
209        ;;
210        /// The `flags` bit mask may contain either 0, for a normal file integrity sync,
211        /// or [types::FsyncFlags::DATASYNC] to provide data sync only semantics.
212        /// See the descriptions of `O_SYNC` and `O_DSYNC` in the `open(2)` manual page for more information.
213        flags: types::FsyncFlags = types::FsyncFlags::empty()
214    }
215
216    pub const CODE = sys::IORING_OP_FSYNC;
217
218    pub fn build(self) -> Entry {
219        let Fsync { fd, flags } = self;
220
221        let mut sqe = sqe_zeroed();
222        sqe.opcode = Self::CODE;
223        assign_fd!(sqe.fd = fd);
224        sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225        Entry(sqe)
226    }
227}
228
229opcode! {
230    /// Read from a file into a fixed buffer that has been previously registered with
231    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
232    ///
233    /// The return values match those documented in the `preadv2(2)` man pages.
234    #[derive(Debug)]
235    pub struct ReadFixed {
236        fd: { impl sealed::UseFixed },
237        buf: { *mut u8 },
238        len: { u32 },
239        buf_index: { u16 },
240        ;;
241        ioprio: u16 = 0,
242        /// The offset of the file to read from.
243        offset: u64 = 0,
244        /// Specified for read operations, contains a bitwise OR of per-I/O flags, as described in
245        /// the `preadv2(2)` man page.
246        rw_flags: types::RwFlags = 0
247    }
248
249    pub const CODE = sys::IORING_OP_READ_FIXED;
250
251    pub fn build(self) -> Entry {
252        let ReadFixed {
253            fd,
254            buf, len, offset,
255            buf_index,
256            ioprio, rw_flags
257        } = self;
258
259        let mut sqe = sqe_zeroed();
260        sqe.opcode = Self::CODE;
261        assign_fd!(sqe.fd = fd);
262        sqe.ioprio = ioprio;
263        sqe.__bindgen_anon_2.addr = buf as _;
264        sqe.len = len;
265        sqe.__bindgen_anon_1.off = offset;
266        sqe.__bindgen_anon_3.rw_flags = rw_flags;
267        sqe.__bindgen_anon_4.buf_index = buf_index;
268        Entry(sqe)
269    }
270}
271
272opcode! {
273    /// Write to a file from a fixed buffer that have been previously registered with
274    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
275    ///
276    /// The return values match those documented in the `pwritev2(2)` man pages.
277    #[derive(Debug)]
278    pub struct WriteFixed {
279        fd: { impl sealed::UseFixed },
280        buf: { *const u8 },
281        len: { u32 },
282        buf_index: { u16 },
283        ;;
284        ioprio: u16 = 0,
285        /// The offset of the file to write to.
286        offset: u64 = 0,
287        /// Specified for write operations, contains a bitwise OR of per-I/O flags, as described in
288        /// the `pwritev2(2)` man page.
289        rw_flags: types::RwFlags = 0
290    }
291
292    pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294    pub fn build(self) -> Entry {
295        let WriteFixed {
296            fd,
297            buf, len, offset,
298            buf_index,
299            ioprio, rw_flags
300        } = self;
301
302        let mut sqe = sqe_zeroed();
303        sqe.opcode = Self::CODE;
304        assign_fd!(sqe.fd = fd);
305        sqe.ioprio = ioprio;
306        sqe.__bindgen_anon_2.addr = buf as _;
307        sqe.len = len;
308        sqe.__bindgen_anon_1.off = offset;
309        sqe.__bindgen_anon_3.rw_flags = rw_flags;
310        sqe.__bindgen_anon_4.buf_index = buf_index;
311        Entry(sqe)
312    }
313}
314
315opcode! {
316    /// Poll the specified fd.
317    ///
318    /// Unlike poll or epoll without `EPOLLONESHOT`, this interface defaults to work in one shot mode.
319    /// That is, once the poll operation is completed, it will have to be resubmitted.
320    ///
321    /// If multi is set, the poll will work in multi shot mode instead. That means it will
322    /// repeatedly trigger when the requested event becomes true, and hence multiple CQEs can be
323    /// generated from this single submission. The CQE flags field will have IORING_CQE_F_MORE set
324    /// on completion if the application should expect further CQE entries from the original
325    /// request. If this flag isn't set on completion, then the poll request has been terminated
326    /// and no further events will be generated. This mode is available since 5.13.
327    #[derive(Debug)]
328    pub struct PollAdd {
329        /// The bits that may be set in `flags` are defined in `<poll.h>`,
330        /// and documented in `poll(2)`.
331        fd: { impl sealed::UseFixed },
332        flags: { u32 },
333        ;;
334        multi: bool = false
335    }
336
337    pub const CODE = sys::IORING_OP_POLL_ADD;
338
339    pub fn build(self) -> Entry {
340        let PollAdd { fd, flags, multi } = self;
341
342        let mut sqe = sqe_zeroed();
343        sqe.opcode = Self::CODE;
344        assign_fd!(sqe.fd = fd);
345        if multi {
346            sqe.len = sys::IORING_POLL_ADD_MULTI;
347        }
348
349        #[cfg(target_endian = "little")] {
350            sqe.__bindgen_anon_3.poll32_events = flags;
351        }
352
353        #[cfg(target_endian = "big")] {
354            let x = flags << 16;
355            let y = flags >> 16;
356            let flags = x | y;
357            sqe.__bindgen_anon_3.poll32_events = flags;
358        }
359
360        Entry(sqe)
361    }
362}
363
364opcode! {
365    /// Remove an existing [poll](PollAdd) request.
366    ///
367    /// If found, the `result` method of the `cqueue::Entry` will return 0.
368    /// If not found, `result` will return `-libc::ENOENT`.
369    #[derive(Debug)]
370    pub struct PollRemove {
371        user_data: { u64 }
372        ;;
373    }
374
375    pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377    pub fn build(self) -> Entry {
378        let PollRemove { user_data } = self;
379
380        let mut sqe = sqe_zeroed();
381        sqe.opcode = Self::CODE;
382        sqe.fd = -1;
383        sqe.__bindgen_anon_2.addr = user_data;
384        Entry(sqe)
385    }
386}
387
388opcode! {
389    /// Sync a file segment with disk, equivalent to `sync_file_range(2)`.
390    #[derive(Debug)]
391    pub struct SyncFileRange {
392        fd: { impl sealed::UseFixed },
393        len: { u32 },
394        ;;
395        /// the offset method holds the offset in bytes
396        offset: u64 = 0,
397        /// the flags method holds the flags for the command
398        flags: u32 = 0
399    }
400
401    pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403    pub fn build(self) -> Entry {
404        let SyncFileRange {
405            fd,
406            len, offset,
407            flags
408        } = self;
409
410        let mut sqe = sqe_zeroed();
411        sqe.opcode = Self::CODE;
412        assign_fd!(sqe.fd = fd);
413        sqe.len = len;
414        sqe.__bindgen_anon_1.off = offset;
415        sqe.__bindgen_anon_3.sync_range_flags = flags;
416        Entry(sqe)
417    }
418}
419
420opcode! {
421    /// Send a message on a socket, equivalent to `send(2)`.
422    ///
423    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
424    /// structure, and flags holds the flags associated with the system call.
425    #[derive(Debug)]
426    pub struct SendMsg {
427        fd: { impl sealed::UseFixed },
428        msg: { *const libc::msghdr },
429        ;;
430        ioprio: u16 = 0,
431        flags: u32 = 0
432    }
433
434    pub const CODE = sys::IORING_OP_SENDMSG;
435
436    pub fn build(self) -> Entry {
437        let SendMsg { fd, msg, ioprio, flags } = self;
438
439        let mut sqe = sqe_zeroed();
440        sqe.opcode = Self::CODE;
441        assign_fd!(sqe.fd = fd);
442        sqe.ioprio = ioprio;
443        sqe.__bindgen_anon_2.addr = msg as _;
444        sqe.len = 1;
445        sqe.__bindgen_anon_3.msg_flags = flags;
446        Entry(sqe)
447    }
448}
449
450opcode! {
451    /// Receive a message on a socket, equivalent to `recvmsg(2)`.
452    ///
453    /// See also the description of [`SendMsg`].
454    #[derive(Debug)]
455    pub struct RecvMsg {
456        fd: { impl sealed::UseFixed },
457        msg: { *mut libc::msghdr },
458        ;;
459        ioprio: u16 = 0,
460        flags: u32 = 0,
461        buf_group: u16 = 0
462    }
463
464    pub const CODE = sys::IORING_OP_RECVMSG;
465
466    pub fn build(self) -> Entry {
467        let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469        let mut sqe = sqe_zeroed();
470        sqe.opcode = Self::CODE;
471        assign_fd!(sqe.fd = fd);
472        sqe.ioprio = ioprio;
473        sqe.__bindgen_anon_2.addr = msg as _;
474        sqe.len = 1;
475        sqe.__bindgen_anon_3.msg_flags = flags;
476        sqe.__bindgen_anon_4.buf_group = buf_group;
477        Entry(sqe)
478    }
479}
480
481opcode! {
482    /// Receive multiple messages on a socket, equivalent to `recvmsg(2)`.
483    ///
484    /// Parameters:
485    ///     msg:       For this multishot variant of ResvMsg, only the msg_namelen and msg_controllen
486    ///                fields are relevant.
487    ///     buf_group: The id of the provided buffer pool to use for each received message.
488    ///
489    /// See also the description of [`SendMsg`] and [`types::RecvMsgOut`].
490    ///
491    /// The multishot version allows the application to issue a single receive request, which
492    /// repeatedly posts a CQE when data is available. It requires the MSG_WAITALL flag is not set.
493    /// Each CQE will take a buffer out of a provided buffer pool for receiving. The application
494    /// should check the flags of each CQE, regardless of its result. If a posted CQE does not have
495    /// the IORING_CQE_F_MORE flag set then the multishot receive will be done and the application
496    /// should issue a new request.
497    ///
498    /// Unlike [`RecvMsg`], this multishot recvmsg will prepend a struct which describes the layout
499    /// of the rest of the buffer in combination with the initial msghdr structure submitted with
500    /// the request. Use [`types::RecvMsgOut`] to parse the data received and access its
501    /// components.
502    ///
503    /// The recvmsg multishot variant is available since kernel 6.0.
504    #[derive(Debug)]
505    pub struct RecvMsgMulti {
506        fd: { impl sealed::UseFixed },
507        msg: { *const libc::msghdr },
508        buf_group: { u16 },
509        ;;
510        ioprio: u16 = 0,
511        flags: u32 = 0
512    }
513
514    pub const CODE = sys::IORING_OP_RECVMSG;
515
516    pub fn build(self) -> Entry {
517        let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519        let mut sqe = sqe_zeroed();
520        sqe.opcode = Self::CODE;
521        assign_fd!(sqe.fd = fd);
522        sqe.__bindgen_anon_2.addr = msg as _;
523        sqe.len = 1;
524        sqe.__bindgen_anon_3.msg_flags = flags;
525        sqe.__bindgen_anon_4.buf_group = buf_group;
526        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527        sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528        Entry(sqe)
529    }
530}
531
532opcode! {
533    /// Register a timeout operation.
534    ///
535    /// A timeout will trigger a wakeup event on the completion ring for anyone waiting for events.
536    /// A timeout condition is met when either the specified timeout expires, or the specified number of events have completed.
537    /// Either condition will trigger the event.
538    /// The request will complete with `-ETIME` if the timeout got completed through expiration of the timer,
539    /// or 0 if the timeout got completed through requests completing on their own.
540    /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`.
541    #[derive(Debug)]
542    pub struct Timeout {
543        timespec: { *const types::Timespec },
544        ;;
545        /// `count` may contain a completion event count.
546        /// If [`TimeoutFlags::TIMEOUT`] is set in `flags`, this is the number of repeats. A value of 0 means the timeout is
547        /// indefinite and can only be stopped by a removal request.
548        count: u32 = 0,
549
550        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
551    }
552
553    pub const CODE = sys::IORING_OP_TIMEOUT;
554
555    pub fn build(self) -> Entry {
556        let Timeout { timespec, count, flags } = self;
557
558        let mut sqe = sqe_zeroed();
559        sqe.opcode = Self::CODE;
560        sqe.fd = -1;
561        sqe.__bindgen_anon_2.addr = timespec as _;
562        sqe.len = 1;
563        sqe.__bindgen_anon_1.off = count as _;
564        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
565        Entry(sqe)
566    }
567}
568
569// === 5.5 ===
570
571opcode! {
572    /// Attempt to remove an existing [timeout operation](Timeout).
573    pub struct TimeoutRemove {
574        user_data: { u64 },
575        ;;
576    }
577
578    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
579
580    pub fn build(self) -> Entry {
581        let TimeoutRemove { user_data } = self;
582
583        let mut sqe = sqe_zeroed();
584        sqe.opcode = Self::CODE;
585        sqe.fd = -1;
586        sqe.__bindgen_anon_2.addr = user_data;
587        Entry(sqe)
588    }
589}
590
591opcode! {
592    /// Attempt to update an existing [timeout operation](Timeout) with a new timespec.
593    /// The optional `count` value of the original timeout value cannot be updated.
594    pub struct TimeoutUpdate {
595        user_data: { u64 },
596        timespec: { *const types::Timespec },
597        ;;
598        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
599    }
600
601    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
602
603    pub fn build(self) -> Entry {
604        let TimeoutUpdate { user_data, timespec, flags } = self;
605
606        let mut sqe = sqe_zeroed();
607        sqe.opcode = Self::CODE;
608        sqe.fd = -1;
609        sqe.__bindgen_anon_1.off = timespec as _;
610        sqe.__bindgen_anon_2.addr = user_data;
611        sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
612        Entry(sqe)
613    }
614}
615
616opcode! {
617    /// Accept a new connection on a socket, equivalent to `accept4(2)`.
618    pub struct Accept {
619        fd: { impl sealed::UseFixed },
620        addr: { *mut libc::sockaddr },
621        addrlen: { *mut libc::socklen_t },
622        ;;
623        file_index: Option<types::DestinationSlot> = None,
624        flags: i32 = 0
625    }
626
627    pub const CODE = sys::IORING_OP_ACCEPT;
628
629    pub fn build(self) -> Entry {
630        let Accept { fd, addr, addrlen, file_index, flags } = self;
631
632        let mut sqe = sqe_zeroed();
633        sqe.opcode = Self::CODE;
634        assign_fd!(sqe.fd = fd);
635        sqe.__bindgen_anon_2.addr = addr as _;
636        sqe.__bindgen_anon_1.addr2 = addrlen as _;
637        sqe.__bindgen_anon_3.accept_flags = flags as _;
638        if let Some(dest) = file_index {
639            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
640        }
641        Entry(sqe)
642    }
643}
644
645opcode! {
646    /// Set a socket option.
647    pub struct SetSockOpt {
648        fd: { impl sealed::UseFixed },
649        level: { u32 },
650        optname: { u32 },
651        optval: { *const libc::c_void },
652        optlen: { u32 },
653        ;;
654        flags: u32 = 0
655    }
656
657    pub const CODE = sys::IORING_OP_URING_CMD;
658
659    pub fn build(self) -> Entry {
660        let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
661        let mut sqe = sqe_zeroed();
662        sqe.opcode = Self::CODE;
663        assign_fd!(sqe.fd = fd);
664        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
665
666        sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
667        sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
668        sqe.__bindgen_anon_3.uring_cmd_flags = flags;
669        sqe.__bindgen_anon_5.optlen = optlen;
670        unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
671        Entry(sqe)
672    }
673}
674
675opcode! {
676    /// Attempt to cancel an already issued request.
677    pub struct AsyncCancel {
678        user_data: { u64 }
679        ;;
680
681        // TODO flags
682    }
683
684    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
685
686    pub fn build(self) -> Entry {
687        let AsyncCancel { user_data } = self;
688
689        let mut sqe = sqe_zeroed();
690        sqe.opcode = Self::CODE;
691        sqe.fd = -1;
692        sqe.__bindgen_anon_2.addr = user_data;
693        Entry(sqe)
694    }
695}
696
697opcode! {
698    /// This request must be linked with another request through
699    /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below.
700    /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue.
701    pub struct LinkTimeout {
702        timespec: { *const types::Timespec },
703        ;;
704        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
705    }
706
707    pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
708
709    pub fn build(self) -> Entry {
710        let LinkTimeout { timespec, flags } = self;
711
712        let mut sqe = sqe_zeroed();
713        sqe.opcode = Self::CODE;
714        sqe.fd = -1;
715        sqe.__bindgen_anon_2.addr = timespec as _;
716        sqe.len = 1;
717        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
718        Entry(sqe)
719    }
720}
721
722opcode! {
723    /// Connect a socket, equivalent to `connect(2)`.
724    pub struct Connect {
725        fd: { impl sealed::UseFixed },
726        addr: { *const libc::sockaddr },
727        addrlen: { libc::socklen_t }
728        ;;
729    }
730
731    pub const CODE = sys::IORING_OP_CONNECT;
732
733    pub fn build(self) -> Entry {
734        let Connect { fd, addr, addrlen } = self;
735
736        let mut sqe = sqe_zeroed();
737        sqe.opcode = Self::CODE;
738        assign_fd!(sqe.fd = fd);
739        sqe.__bindgen_anon_2.addr = addr as _;
740        sqe.__bindgen_anon_1.off = addrlen as _;
741        Entry(sqe)
742    }
743}
744
745// === 5.6 ===
746
747opcode! {
748    /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`.
749    pub struct Fallocate {
750        fd: { impl sealed::UseFixed },
751        len: { u64 },
752        ;;
753        offset: u64 = 0,
754        mode: i32 = 0
755    }
756
757    pub const CODE = sys::IORING_OP_FALLOCATE;
758
759    pub fn build(self) -> Entry {
760        let Fallocate { fd, len, offset, mode } = self;
761
762        let mut sqe = sqe_zeroed();
763        sqe.opcode = Self::CODE;
764        assign_fd!(sqe.fd = fd);
765        sqe.__bindgen_anon_2.addr = len;
766        sqe.len = mode as _;
767        sqe.__bindgen_anon_1.off = offset;
768        Entry(sqe)
769    }
770}
771
772opcode! {
773    /// Open a file, equivalent to `openat(2)`.
774    pub struct OpenAt {
775        dirfd: { impl sealed::UseFd },
776        pathname: { *const libc::c_char },
777        ;;
778        file_index: Option<types::DestinationSlot> = None,
779        flags: i32 = 0,
780        mode: libc::mode_t = 0
781    }
782
783    pub const CODE = sys::IORING_OP_OPENAT;
784
785    pub fn build(self) -> Entry {
786        let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
787
788        let mut sqe = sqe_zeroed();
789        sqe.opcode = Self::CODE;
790        sqe.fd = dirfd;
791        sqe.__bindgen_anon_2.addr = pathname as _;
792        sqe.len = mode;
793        sqe.__bindgen_anon_3.open_flags = flags as _;
794        if let Some(dest) = file_index {
795            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
796        }
797        Entry(sqe)
798    }
799}
800
801opcode! {
802    /// Close a file descriptor, equivalent to `close(2)`.
803    ///
804    /// Use a types::Fixed(fd) argument to close an io_uring direct descriptor.
805    pub struct Close {
806        fd: { impl sealed::UseFixed },
807        ;;
808    }
809
810    pub const CODE = sys::IORING_OP_CLOSE;
811
812    pub fn build(self) -> Entry {
813        let Close { fd } = self;
814
815        let mut sqe = sqe_zeroed();
816        sqe.opcode = Self::CODE;
817        match fd {
818            sealed::Target::Fd(fd) => sqe.fd = fd,
819            sealed::Target::Fixed(idx) => {
820                sqe.fd = 0;
821                sqe.__bindgen_anon_5.file_index = idx + 1;
822            }
823        }
824        Entry(sqe)
825    }
826}
827
828opcode! {
829    /// This command is an alternative to using
830    /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then
831    /// works in an async fashion, like the rest of the io_uring commands.
832    pub struct FilesUpdate {
833        fds: { *const RawFd },
834        len: { u32 },
835        ;;
836        offset: i32 = 0
837    }
838
839    pub const CODE = sys::IORING_OP_FILES_UPDATE;
840
841    pub fn build(self) -> Entry {
842        let FilesUpdate { fds, len, offset } = self;
843
844        let mut sqe = sqe_zeroed();
845        sqe.opcode = Self::CODE;
846        sqe.fd = -1;
847        sqe.__bindgen_anon_2.addr = fds as _;
848        sqe.len = len;
849        sqe.__bindgen_anon_1.off = offset as _;
850        Entry(sqe)
851    }
852}
853
854opcode! {
855    /// Get file status, equivalent to `statx(2)`.
856    pub struct Statx {
857        dirfd: { impl sealed::UseFd },
858        pathname: { *const libc::c_char },
859        statxbuf: { *mut types::statx },
860        ;;
861        flags: i32 = 0,
862        mask: u32 = 0
863    }
864
865    pub const CODE = sys::IORING_OP_STATX;
866
867    pub fn build(self) -> Entry {
868        let Statx {
869            dirfd, pathname, statxbuf,
870            flags, mask
871        } = self;
872
873        let mut sqe = sqe_zeroed();
874        sqe.opcode = Self::CODE;
875        sqe.fd = dirfd;
876        sqe.__bindgen_anon_2.addr = pathname as _;
877        sqe.len = mask;
878        sqe.__bindgen_anon_1.off = statxbuf as _;
879        sqe.__bindgen_anon_3.statx_flags = flags as _;
880        Entry(sqe)
881    }
882}
883
884opcode! {
885    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
886    ///
887    /// * `fd` is the file descriptor to be operated on,
888    /// * `addr` contains the buffer in question,
889    /// * `len` contains the length of the IO operation,
890    ///
891    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
892    /// See also `read(2)` and `write(2)` for the general description of the related system call.
893    ///
894    /// Available since 5.6.
895    pub struct Read {
896        fd: { impl sealed::UseFixed },
897        buf: { *mut u8 },
898        len: { u32 },
899        ;;
900        /// `offset` contains the read or write offset.
901        ///
902        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
903        /// If `offset` is set to `-1`, the offset will use (and advance) the file position,
904        /// like the `read(2)` and `write(2)` system calls.
905        offset: u64 = 0,
906        ioprio: u16 = 0,
907        rw_flags: types::RwFlags = 0,
908        buf_group: u16 = 0
909    }
910
911    pub const CODE = sys::IORING_OP_READ;
912
913    pub fn build(self) -> Entry {
914        let Read {
915            fd,
916            buf, len, offset,
917            ioprio, rw_flags,
918            buf_group
919        } = self;
920
921        let mut sqe = sqe_zeroed();
922        sqe.opcode = Self::CODE;
923        assign_fd!(sqe.fd = fd);
924        sqe.ioprio = ioprio;
925        sqe.__bindgen_anon_2.addr = buf as _;
926        sqe.len = len;
927        sqe.__bindgen_anon_1.off = offset;
928        sqe.__bindgen_anon_3.rw_flags = rw_flags;
929        sqe.__bindgen_anon_4.buf_group = buf_group;
930        Entry(sqe)
931    }
932}
933
934opcode! {
935    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
936    ///
937    /// * `fd` is the file descriptor to be operated on,
938    /// * `addr` contains the buffer in question,
939    /// * `len` contains the length of the IO operation,
940    ///
941    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
942    /// See also `read(2)` and `write(2)` for the general description of the related system call.
943    ///
944    /// Available since 5.6.
945    pub struct Write {
946        fd: { impl sealed::UseFixed },
947        buf: { *const u8 },
948        len: { u32 },
949        ;;
950        /// `offset` contains the read or write offset.
951        ///
952        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
953        /// If `offsett` is set to `-1`, the offset will use (and advance) the file position,
954        /// like the `read(2)` and `write(2)` system calls.
955        offset: u64 = 0,
956        ioprio: u16 = 0,
957        rw_flags: types::RwFlags = 0
958    }
959
960    pub const CODE = sys::IORING_OP_WRITE;
961
962    pub fn build(self) -> Entry {
963        let Write {
964            fd,
965            buf, len, offset,
966            ioprio, rw_flags
967        } = self;
968
969        let mut sqe = sqe_zeroed();
970        sqe.opcode = Self::CODE;
971        assign_fd!(sqe.fd = fd);
972        sqe.ioprio = ioprio;
973        sqe.__bindgen_anon_2.addr = buf as _;
974        sqe.len = len;
975        sqe.__bindgen_anon_1.off = offset;
976        sqe.__bindgen_anon_3.rw_flags = rw_flags;
977        Entry(sqe)
978    }
979}
980
981opcode! {
982    /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`.
983    pub struct Fadvise {
984        fd: { impl sealed::UseFixed },
985        len: { libc::off_t },
986        advice: { i32 },
987        ;;
988        offset: u64 = 0,
989    }
990
991    pub const CODE = sys::IORING_OP_FADVISE;
992
993    pub fn build(self) -> Entry {
994        let Fadvise { fd, len, advice, offset } = self;
995
996        let mut sqe = sqe_zeroed();
997        sqe.opcode = Self::CODE;
998        assign_fd!(sqe.fd = fd);
999        sqe.len = len as _;
1000        sqe.__bindgen_anon_1.off = offset;
1001        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1002        Entry(sqe)
1003    }
1004}
1005
1006opcode! {
1007    /// Give advice about use of memory, equivalent to `madvise(2)`.
1008    pub struct Madvise {
1009        addr: { *const libc::c_void },
1010        len: { libc::off_t },
1011        advice: { i32 },
1012        ;;
1013    }
1014
1015    pub const CODE = sys::IORING_OP_MADVISE;
1016
1017    pub fn build(self) -> Entry {
1018        let Madvise { addr, len, advice } = self;
1019
1020        let mut sqe = sqe_zeroed();
1021        sqe.opcode = Self::CODE;
1022        sqe.fd = -1;
1023        sqe.__bindgen_anon_2.addr = addr as _;
1024        sqe.len = len as _;
1025        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1026        Entry(sqe)
1027    }
1028}
1029
1030opcode! {
1031    /// Send a message on a socket, equivalent to `send(2)`.
1032    pub struct Send {
1033        fd: { impl sealed::UseFixed },
1034        buf: { *const u8 },
1035        len: { u32 },
1036        ;;
1037        flags: i32 = 0,
1038
1039        /// Set the destination address, for sending from an unconnected socket.
1040        ///
1041        /// When set, `dest_addr_len` must be set as well.
1042        /// See also `man 3 io_uring_prep_send_set_addr`.
1043        dest_addr: *const libc::sockaddr = core::ptr::null(),
1044        dest_addr_len: libc::socklen_t = 0,
1045    }
1046
1047    pub const CODE = sys::IORING_OP_SEND;
1048
1049    pub fn build(self) -> Entry {
1050        let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1051
1052        let mut sqe = sqe_zeroed();
1053        sqe.opcode = Self::CODE;
1054        assign_fd!(sqe.fd = fd);
1055        sqe.__bindgen_anon_2.addr = buf as _;
1056        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1057        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1058        sqe.len = len;
1059        sqe.__bindgen_anon_3.msg_flags = flags as _;
1060        Entry(sqe)
1061    }
1062}
1063
1064opcode! {
1065    /// Receive a message from a socket, equivalent to `recv(2)`.
1066    pub struct Recv {
1067        fd: { impl sealed::UseFixed },
1068        buf: { *mut u8 },
1069        len: { u32 },
1070        ;;
1071        flags: i32 = 0,
1072        buf_group: u16 = 0
1073    }
1074
1075    pub const CODE = sys::IORING_OP_RECV;
1076
1077    pub fn build(self) -> Entry {
1078        let Recv { fd, buf, len, flags, buf_group } = self;
1079
1080        let mut sqe = sqe_zeroed();
1081        sqe.opcode = Self::CODE;
1082        assign_fd!(sqe.fd = fd);
1083        sqe.__bindgen_anon_2.addr = buf as _;
1084        sqe.len = len;
1085        sqe.__bindgen_anon_3.msg_flags = flags as _;
1086        sqe.__bindgen_anon_4.buf_group = buf_group;
1087        Entry(sqe)
1088    }
1089}
1090
1091opcode! {
1092    /// Receive multiple messages from a socket, equivalent to `recv(2)`.
1093    ///
1094    /// Parameter:
1095    ///     buf_group: The id of the provided buffer pool to use for each received message.
1096    ///
1097    /// MSG_WAITALL should not be set in flags.
1098    ///
1099    /// The multishot version allows the application to issue a single receive request, which
1100    /// repeatedly posts a CQE when data is available. Each CQE will take a buffer out of a
1101    /// provided buffer pool for receiving. The application should check the flags of each CQE,
1102    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
1103    /// the multishot receive will be done and the application should issue a new request.
1104    ///
1105    /// Multishot variants are available since kernel 6.0.
1106
1107    pub struct RecvMulti {
1108        fd: { impl sealed::UseFixed },
1109        buf_group: { u16 },
1110        ;;
1111        flags: i32 = 0,
1112    }
1113
1114    pub const CODE = sys::IORING_OP_RECV;
1115
1116    pub fn build(self) -> Entry {
1117        let RecvMulti { fd, buf_group, flags } = self;
1118
1119        let mut sqe = sqe_zeroed();
1120        sqe.opcode = Self::CODE;
1121        assign_fd!(sqe.fd = fd);
1122        sqe.__bindgen_anon_3.msg_flags = flags as _;
1123        sqe.__bindgen_anon_4.buf_group = buf_group;
1124        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1125        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1126        Entry(sqe)
1127    }
1128}
1129
1130opcode! {
1131    /// Open a file, equivalent to `openat2(2)`.
1132    pub struct OpenAt2 {
1133        dirfd: { impl sealed::UseFd },
1134        pathname: { *const libc::c_char },
1135        how: { *const types::OpenHow }
1136        ;;
1137        file_index: Option<types::DestinationSlot> = None,
1138    }
1139
1140    pub const CODE = sys::IORING_OP_OPENAT2;
1141
1142    pub fn build(self) -> Entry {
1143        let OpenAt2 { dirfd, pathname, how, file_index } = self;
1144
1145        let mut sqe = sqe_zeroed();
1146        sqe.opcode = Self::CODE;
1147        sqe.fd = dirfd;
1148        sqe.__bindgen_anon_2.addr = pathname as _;
1149        sqe.len = mem::size_of::<sys::open_how>() as _;
1150        sqe.__bindgen_anon_1.off = how as _;
1151        if let Some(dest) = file_index {
1152            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1153        }
1154        Entry(sqe)
1155    }
1156}
1157
1158opcode! {
1159    /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`.
1160    pub struct EpollCtl {
1161        epfd: { impl sealed::UseFixed },
1162        fd: { impl sealed::UseFd },
1163        op: { i32 },
1164        ev: { *const types::epoll_event },
1165        ;;
1166    }
1167
1168    pub const CODE = sys::IORING_OP_EPOLL_CTL;
1169
1170    pub fn build(self) -> Entry {
1171        let EpollCtl { epfd, fd, op, ev } = self;
1172
1173        let mut sqe = sqe_zeroed();
1174        sqe.opcode = Self::CODE;
1175        assign_fd!(sqe.fd = epfd);
1176        sqe.__bindgen_anon_2.addr = ev as _;
1177        sqe.len = op as _;
1178        sqe.__bindgen_anon_1.off = fd as _;
1179        Entry(sqe)
1180    }
1181}
1182
1183// === 5.7 ===
1184
1185opcode! {
1186    /// Splice data to/from a pipe, equivalent to `splice(2)`.
1187    ///
1188    /// if `fd_in` refers to a pipe, `off_in` must be `-1`;
1189    /// The description of `off_in` also applied to `off_out`.
1190    pub struct Splice {
1191        fd_in: { impl sealed::UseFixed },
1192        off_in: { i64 },
1193        fd_out: { impl sealed::UseFixed },
1194        off_out: { i64 },
1195        len: { u32 },
1196        ;;
1197        /// see man `splice(2)` for description of flags.
1198        flags: u32 = 0
1199    }
1200
1201    pub const CODE = sys::IORING_OP_SPLICE;
1202
1203    pub fn build(self) -> Entry {
1204        let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1205
1206        let mut sqe = sqe_zeroed();
1207        sqe.opcode = Self::CODE;
1208        assign_fd!(sqe.fd = fd_out);
1209        sqe.len = len;
1210        sqe.__bindgen_anon_1.off = off_out as _;
1211
1212        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1213            sealed::Target::Fd(fd) => fd,
1214            sealed::Target::Fixed(idx) => {
1215                flags |= sys::SPLICE_F_FD_IN_FIXED;
1216                idx as _
1217            }
1218        };
1219
1220        sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1221        sqe.__bindgen_anon_3.splice_flags = flags;
1222        Entry(sqe)
1223    }
1224}
1225
1226opcode! {
1227    /// Register `nbufs` buffers that each have the length `len` with ids starting from `bid` in the
1228    /// group `bgid` that can be used for any request. See
1229    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1230    pub struct ProvideBuffers {
1231        addr: { *mut u8 },
1232        len: { i32 },
1233        nbufs: { u16 },
1234        bgid: { u16 },
1235        bid: { u16 }
1236        ;;
1237    }
1238
1239    pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1240
1241    pub fn build(self) -> Entry {
1242        let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1243
1244        let mut sqe = sqe_zeroed();
1245        sqe.opcode = Self::CODE;
1246        sqe.fd = nbufs as _;
1247        sqe.__bindgen_anon_2.addr = addr as _;
1248        sqe.len = len as _;
1249        sqe.__bindgen_anon_1.off = bid as _;
1250        sqe.__bindgen_anon_4.buf_group = bgid;
1251        Entry(sqe)
1252    }
1253}
1254
1255opcode! {
1256    /// Remove some number of buffers from a buffer group. See
1257    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1258    pub struct RemoveBuffers {
1259        nbufs: { u16 },
1260        bgid: { u16 }
1261        ;;
1262    }
1263
1264    pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1265
1266    pub fn build(self) -> Entry {
1267        let RemoveBuffers { nbufs, bgid } = self;
1268
1269        let mut sqe = sqe_zeroed();
1270        sqe.opcode = Self::CODE;
1271        sqe.fd = nbufs as _;
1272        sqe.__bindgen_anon_4.buf_group = bgid;
1273        Entry(sqe)
1274    }
1275}
1276
1277// === 5.8 ===
1278
1279opcode! {
1280    /// Duplicate pipe content, equivalent to `tee(2)`.
1281    pub struct Tee {
1282        fd_in: { impl sealed::UseFixed },
1283        fd_out: { impl sealed::UseFixed },
1284        len: { u32 }
1285        ;;
1286        flags: u32 = 0
1287    }
1288
1289    pub const CODE = sys::IORING_OP_TEE;
1290
1291    pub fn build(self) -> Entry {
1292        let Tee { fd_in, fd_out, len, mut flags } = self;
1293
1294        let mut sqe = sqe_zeroed();
1295        sqe.opcode = Self::CODE;
1296
1297        assign_fd!(sqe.fd = fd_out);
1298        sqe.len = len;
1299
1300        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1301            sealed::Target::Fd(fd) => fd,
1302            sealed::Target::Fixed(idx) => {
1303                flags |= sys::SPLICE_F_FD_IN_FIXED;
1304                idx as _
1305            }
1306        };
1307
1308        sqe.__bindgen_anon_3.splice_flags = flags;
1309
1310        Entry(sqe)
1311    }
1312}
1313
1314// === 5.11 ===
1315
1316opcode! {
1317    /// Shut down all or part of a full duplex connection on a socket, equivalent to `shutdown(2)`.
1318    /// Available since kernel 5.11.
1319    pub struct Shutdown {
1320        fd: { impl sealed::UseFixed },
1321        how: { i32 },
1322        ;;
1323    }
1324
1325    pub const CODE = sys::IORING_OP_SHUTDOWN;
1326
1327    pub fn build(self) -> Entry {
1328        let Shutdown { fd, how } = self;
1329
1330        let mut sqe = sqe_zeroed();
1331        sqe.opcode = Self::CODE;
1332        assign_fd!(sqe.fd = fd);
1333        sqe.len = how as _;
1334        Entry(sqe)
1335    }
1336}
1337
1338opcode! {
1339    // Change the name or location of a file, equivalent to `renameat2(2)`.
1340    // Available since kernel 5.11.
1341    pub struct RenameAt {
1342        olddirfd: { impl sealed::UseFd },
1343        oldpath: { *const libc::c_char },
1344        newdirfd: { impl sealed::UseFd },
1345        newpath: { *const libc::c_char },
1346        ;;
1347        flags: u32 = 0
1348    }
1349
1350    pub const CODE = sys::IORING_OP_RENAMEAT;
1351
1352    pub fn build(self) -> Entry {
1353        let RenameAt {
1354            olddirfd, oldpath,
1355            newdirfd, newpath,
1356            flags
1357        } = self;
1358
1359        let mut sqe = sqe_zeroed();
1360        sqe.opcode = Self::CODE;
1361        sqe.fd = olddirfd;
1362        sqe.__bindgen_anon_2.addr = oldpath as _;
1363        sqe.len = newdirfd as _;
1364        sqe.__bindgen_anon_1.off = newpath as _;
1365        sqe.__bindgen_anon_3.rename_flags = flags;
1366        Entry(sqe)
1367    }
1368}
1369
1370opcode! {
1371    // Delete a name and possible the file it refers to, equivalent to `unlinkat(2)`.
1372    // Available since kernel 5.11.
1373    pub struct UnlinkAt {
1374        dirfd: { impl sealed::UseFd },
1375        pathname: { *const libc::c_char },
1376        ;;
1377        flags: i32 = 0
1378    }
1379
1380    pub const CODE = sys::IORING_OP_UNLINKAT;
1381
1382    pub fn build(self) -> Entry {
1383        let UnlinkAt { dirfd, pathname, flags } = self;
1384
1385        let mut sqe = sqe_zeroed();
1386        sqe.opcode = Self::CODE;
1387        sqe.fd = dirfd;
1388        sqe.__bindgen_anon_2.addr = pathname as _;
1389        sqe.__bindgen_anon_3.unlink_flags = flags as _;
1390        Entry(sqe)
1391    }
1392}
1393
1394// === 5.15 ===
1395
1396opcode! {
1397    /// Make a directory, equivalent to `mkdirat(2)`.
1398    pub struct MkDirAt {
1399        dirfd: { impl sealed::UseFd },
1400        pathname: { *const libc::c_char },
1401        ;;
1402        mode: libc::mode_t = 0
1403    }
1404
1405    pub const CODE = sys::IORING_OP_MKDIRAT;
1406
1407    pub fn build(self) -> Entry {
1408        let MkDirAt { dirfd, pathname, mode } = self;
1409
1410        let mut sqe = sqe_zeroed();
1411        sqe.opcode = Self::CODE;
1412        sqe.fd = dirfd;
1413        sqe.__bindgen_anon_2.addr = pathname as _;
1414        sqe.len = mode;
1415        Entry(sqe)
1416    }
1417}
1418
1419opcode! {
1420    /// Create a symlink, equivalent to `symlinkat(2)`.
1421    pub struct SymlinkAt {
1422        newdirfd: { impl sealed::UseFd },
1423        target: { *const libc::c_char },
1424        linkpath: { *const libc::c_char },
1425        ;;
1426    }
1427
1428    pub const CODE = sys::IORING_OP_SYMLINKAT;
1429
1430    pub fn build(self) -> Entry {
1431        let SymlinkAt { newdirfd, target, linkpath } = self;
1432
1433        let mut sqe = sqe_zeroed();
1434        sqe.opcode = Self::CODE;
1435        sqe.fd = newdirfd;
1436        sqe.__bindgen_anon_2.addr = target as _;
1437        sqe.__bindgen_anon_1.addr2 = linkpath as _;
1438        Entry(sqe)
1439    }
1440}
1441
1442opcode! {
1443    /// Create a hard link, equivalent to `linkat(2)`.
1444    pub struct LinkAt {
1445        olddirfd: { impl sealed::UseFd },
1446        oldpath: { *const libc::c_char },
1447        newdirfd: { impl sealed::UseFd },
1448        newpath: { *const libc::c_char },
1449        ;;
1450        flags: i32 = 0
1451    }
1452
1453    pub const CODE = sys::IORING_OP_LINKAT;
1454
1455    pub fn build(self) -> Entry {
1456        let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1457
1458        let mut sqe = sqe_zeroed();
1459        sqe.opcode = Self::CODE;
1460        sqe.fd = olddirfd as _;
1461        sqe.__bindgen_anon_2.addr = oldpath as _;
1462        sqe.len = newdirfd as _;
1463        sqe.__bindgen_anon_1.addr2 = newpath as _;
1464        sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1465        Entry(sqe)
1466    }
1467}
1468
1469// === 5.18 ===
1470
1471opcode! {
1472    /// Send a message (with data) to a target ring.
1473    pub struct MsgRingData {
1474        ring_fd: { impl sealed::UseFd },
1475        result: { i32 },
1476        user_data: { u64 },
1477        user_flags: { Option<u32> },
1478        ;;
1479        opcode_flags: u32 = 0
1480    }
1481
1482    pub const CODE = sys::IORING_OP_MSG_RING;
1483
1484    pub fn build(self) -> Entry {
1485        let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1486
1487        let mut sqe = sqe_zeroed();
1488        sqe.opcode = Self::CODE;
1489        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1490        sqe.fd = ring_fd;
1491        sqe.len = result as u32;
1492        sqe.__bindgen_anon_1.off = user_data;
1493        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1494        if let Some(flags) = user_flags {
1495            sqe.__bindgen_anon_5.file_index = flags;
1496            unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1497        }
1498        Entry(sqe)
1499    }
1500}
1501
1502// === 5.19 ===
1503
1504opcode! {
1505    /// Attempt to cancel an already issued request, receiving a cancellation
1506    /// builder, which allows for the new cancel criterias introduced since
1507    /// 5.19.
1508    pub struct AsyncCancel2 {
1509        builder: { types::CancelBuilder }
1510        ;;
1511    }
1512
1513    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1514
1515    pub fn build(self) -> Entry {
1516        let AsyncCancel2 { builder } = self;
1517
1518        let mut sqe = sqe_zeroed();
1519        sqe.opcode = Self::CODE;
1520        sqe.fd = builder.to_fd();
1521        sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1522        sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1523        Entry(sqe)
1524    }
1525}
1526
1527opcode! {
1528    /// A file/device-specific 16-byte command, akin (but not equivalent) to `ioctl(2)`.
1529    pub struct UringCmd16 {
1530        fd: { impl sealed::UseFixed },
1531        cmd_op: { u32 },
1532        ;;
1533        /// The `buf_index` is an index into an array of fixed buffers,
1534        /// and is only valid if fixed buffers were registered.
1535        buf_index: Option<u16> = None,
1536        /// Arbitrary command data.
1537        cmd: [u8; 16] = [0u8; 16]
1538    }
1539
1540    pub const CODE = sys::IORING_OP_URING_CMD;
1541
1542    pub fn build(self) -> Entry {
1543        let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1544
1545        let mut sqe = sqe_zeroed();
1546        sqe.opcode = Self::CODE;
1547        assign_fd!(sqe.fd = fd);
1548        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1549        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1550        if let Some(buf_index) = buf_index {
1551            sqe.__bindgen_anon_4.buf_index = buf_index;
1552            unsafe {
1553                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1554            }
1555        }
1556        Entry(sqe)
1557    }
1558}
1559
1560opcode! {
1561    /// A file/device-specific 80-byte command, akin (but not equivalent) to `ioctl(2)`.
1562    pub struct UringCmd80 {
1563        fd: { impl sealed::UseFixed },
1564        cmd_op: { u32 },
1565        ;;
1566        /// The `buf_index` is an index into an array of fixed buffers,
1567        /// and is only valid if fixed buffers were registered.
1568        buf_index: Option<u16> = None,
1569        /// Arbitrary command data.
1570        cmd: [u8; 80] = [0u8; 80]
1571    }
1572
1573    pub const CODE = sys::IORING_OP_URING_CMD;
1574
1575    pub fn build(self) -> Entry128 {
1576        let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1577
1578        let cmd1 = cmd[..16].try_into().unwrap();
1579        let cmd2 = cmd[16..].try_into().unwrap();
1580
1581        let mut sqe = sqe_zeroed();
1582        sqe.opcode = Self::CODE;
1583        assign_fd!(sqe.fd = fd);
1584        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1585        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1586        if let Some(buf_index) = buf_index {
1587            sqe.__bindgen_anon_4.buf_index = buf_index;
1588            unsafe {
1589                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1590            }
1591        }
1592        Entry128(Entry(sqe), cmd2)
1593    }
1594}
1595
1596opcode! {
1597    /// Create an endpoint for communication, equivalent to `socket(2)`.
1598    ///
1599    /// If the `file_index` argument is set, the resulting socket is
1600    /// directly mapped to the given fixed-file slot instead of being
1601    /// returned as a normal file descriptor. The application must first
1602    /// have registered a file table, and the target slot should fit into
1603    /// it.
1604    ///
1605    /// Available since 5.19.
1606    pub struct Socket {
1607        domain: { i32 },
1608        socket_type: { i32 },
1609        protocol: { i32 },
1610        ;;
1611        file_index: Option<types::DestinationSlot> = None,
1612        flags: types::RwFlags = 0,
1613    }
1614
1615    pub const CODE = sys::IORING_OP_SOCKET;
1616
1617    pub fn build(self) -> Entry {
1618        let Socket { domain, socket_type, protocol, file_index, flags } = self;
1619
1620        let mut sqe = sqe_zeroed();
1621        sqe.opcode = Self::CODE;
1622        sqe.fd = domain as _;
1623        sqe.__bindgen_anon_1.off = socket_type as _;
1624        sqe.len = protocol as _;
1625        sqe.__bindgen_anon_3.rw_flags = flags;
1626        if let Some(dest) = file_index {
1627            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1628        }
1629        Entry(sqe)
1630    }
1631}
1632
1633opcode! {
1634    /// Accept multiple new connections on a socket.
1635    ///
1636    /// Set the `allocate_file_index` property if fixed file table entries should be used.
1637    ///
1638    /// Available since 5.19.
1639    pub struct AcceptMulti {
1640        fd: { impl sealed::UseFixed },
1641        ;;
1642        allocate_file_index: bool = false,
1643        flags: i32 = 0
1644    }
1645
1646    pub const CODE = sys::IORING_OP_ACCEPT;
1647
1648    pub fn build(self) -> Entry {
1649        let AcceptMulti { fd, allocate_file_index, flags } = self;
1650
1651        let mut sqe = sqe_zeroed();
1652        sqe.opcode = Self::CODE;
1653        assign_fd!(sqe.fd = fd);
1654        sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1655        // No out SockAddr is passed for the multishot accept case.
1656        // The user should perform a syscall to get any resulting connection's remote address.
1657        sqe.__bindgen_anon_3.accept_flags = flags as _;
1658        if allocate_file_index {
1659            sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1660        }
1661        Entry(sqe)
1662    }
1663}
1664
1665// === 6.0 ===
1666
1667opcode! {
1668    /// Send a message (with fixed FD) to a target ring.
1669    pub struct MsgRingSendFd {
1670        ring_fd: { impl sealed::UseFd },
1671        fixed_slot_src: { types::Fixed },
1672        dest_slot_index: { types::DestinationSlot },
1673        user_data: { u64 },
1674        ;;
1675        opcode_flags: u32 = 0
1676    }
1677
1678    pub const CODE = sys::IORING_OP_MSG_RING;
1679
1680    pub fn build(self) -> Entry {
1681        let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1682
1683        let mut sqe = sqe_zeroed();
1684        sqe.opcode = Self::CODE;
1685        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1686        sqe.fd = ring_fd;
1687        sqe.__bindgen_anon_1.off = user_data;
1688        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1689        sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1690        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1691        Entry(sqe)
1692    }
1693}
1694
1695// === 6.0 ===
1696
1697opcode! {
1698    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1699    ///
1700    /// When `dest_addr` is non-zero it points to the address of the target with `dest_addr_len`
1701    /// specifying its size, turning the request into a `sendto(2)`
1702    ///
1703    /// A fixed (pre-mapped) buffer can optionally be used from pre-mapped buffers that have been
1704    /// previously registered with [`Submitter::register_buffers`](crate::Submitter::register_buffers).
1705    ///
1706    /// This operation might result in two completion queue entries.
1707    /// See the `IORING_OP_SEND_ZC` section at [io_uring_enter][] for the exact semantics.
1708    /// Notifications posted by this operation can be checked with [notif](crate::cqueue::notif).
1709    ///
1710    /// [io_uring_enter]: https://man7.org/linux/man-pages/man2/io_uring_enter.2.html
1711    pub struct SendZc {
1712        fd: { impl sealed::UseFixed },
1713        buf: { *const u8 },
1714        len: { u32 },
1715        ;;
1716        /// The `buf_index` is an index into an array of fixed buffers, and is only valid if fixed
1717        /// buffers were registered.
1718        ///
1719        /// The buf and len arguments must fall within a region specified by buf_index in the
1720        /// previously registered buffer. The buffer need not be aligned with the start of the
1721        /// registered buffer.
1722        buf_index: Option<u16> = None,
1723        dest_addr: *const libc::sockaddr = core::ptr::null(),
1724        dest_addr_len: libc::socklen_t = 0,
1725        flags: i32 = 0,
1726        zc_flags: u16 = 0,
1727    }
1728
1729    pub const CODE = sys::IORING_OP_SEND_ZC;
1730
1731    pub fn build(self) -> Entry {
1732        let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1733
1734        let mut sqe = sqe_zeroed();
1735        sqe.opcode = Self::CODE;
1736        assign_fd!(sqe.fd = fd);
1737        sqe.__bindgen_anon_2.addr = buf as _;
1738        sqe.len = len;
1739        sqe.__bindgen_anon_3.msg_flags = flags as _;
1740        sqe.ioprio = zc_flags;
1741        if let Some(buf_index) = buf_index {
1742            sqe.__bindgen_anon_4.buf_index = buf_index;
1743            sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1744        }
1745        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1746        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1747        Entry(sqe)
1748    }
1749}
1750
1751// === 6.1 ===
1752
1753opcode! {
1754    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1755    ///
1756    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
1757    /// structure, and flags holds the flags associated with the system call.
1758    #[derive(Debug)]
1759    pub struct SendMsgZc {
1760        fd: { impl sealed::UseFixed },
1761        msg: { *const libc::msghdr },
1762        ;;
1763        ioprio: u16 = 0,
1764        flags: u32 = 0
1765    }
1766
1767    pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1768
1769    pub fn build(self) -> Entry {
1770        let SendMsgZc { fd, msg, ioprio, flags } = self;
1771
1772        let mut sqe = sqe_zeroed();
1773        sqe.opcode = Self::CODE;
1774        assign_fd!(sqe.fd = fd);
1775        sqe.ioprio = ioprio;
1776        sqe.__bindgen_anon_2.addr = msg as _;
1777        sqe.len = 1;
1778        sqe.__bindgen_anon_3.msg_flags = flags;
1779        Entry(sqe)
1780    }
1781}
1782
1783// === 6.7 ===
1784
1785opcode! {
1786    /// Wait on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAIT_BITSET`.
1787    ///
1788    /// Wait on a futex at address `futex` and which still has the value `val` and with `futex2(2)`
1789    /// flags of `futex_flags`. `musk` can be set to a specific bitset mask, which will be matched
1790    /// by the waking side to decide who to wake up. To always get woken, an application may use
1791    /// `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits). `futex_flags` follows the `futex2(2)`
1792    /// flags, not the `futex(2)` v1 interface flags. `flags` are currently unused and hence `0`
1793    /// must be passed.
1794    #[derive(Debug)]
1795    pub struct FutexWait {
1796        futex: { *const u32 },
1797        val: { u64 },
1798        mask: { u64 },
1799        futex_flags: { u32 },
1800        ;;
1801        flags: u32 = 0
1802    }
1803
1804    pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1805
1806    pub fn build(self) -> Entry {
1807        let FutexWait { futex, val, mask, futex_flags, flags } = self;
1808
1809        let mut sqe = sqe_zeroed();
1810        sqe.opcode = Self::CODE;
1811        sqe.fd = futex_flags as _;
1812        sqe.__bindgen_anon_2.addr = futex as usize as _;
1813        sqe.__bindgen_anon_1.off = val;
1814        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1815        sqe.__bindgen_anon_3.futex_flags = flags;
1816        Entry(sqe)
1817    }
1818}
1819
1820opcode! {
1821    /// Wake up waiters on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAKE_BITSET`.
1822    ///
1823    /// Wake any waiters on the futex indicated by `futex` and at most `val` futexes. `futex_flags`
1824    /// indicates the `futex2(2)` modifier flags. If a given bitset for who to wake is desired,
1825    /// then that must be set in `mask`. Use `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits) to
1826    /// match any waiter on the given futex. `flags` are currently unused and hence `0` must be
1827    /// passed.
1828    #[derive(Debug)]
1829    pub struct FutexWake {
1830        futex: { *const u32 },
1831        val: { u64 },
1832        mask: { u64 },
1833        futex_flags: { u32 },
1834        ;;
1835        flags: u32 = 0
1836    }
1837
1838    pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1839
1840    pub fn build(self) -> Entry {
1841        let FutexWake { futex, val, mask, futex_flags, flags } = self;
1842
1843        let mut sqe = sqe_zeroed();
1844        sqe.opcode = Self::CODE;
1845        sqe.fd = futex_flags as _;
1846        sqe.__bindgen_anon_2.addr = futex as usize as _;
1847        sqe.__bindgen_anon_1.off = val;
1848        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1849        sqe.__bindgen_anon_3.futex_flags = flags;
1850        Entry(sqe)
1851    }
1852}
1853
1854opcode! {
1855    /// Wait on multiple futexes.
1856    ///
1857    /// Wait on multiple futexes at the same time. Futexes are given by `futexv` and `nr_futex` is
1858    /// the number of futexes in that array. Unlike `FutexWait`, the desired bitset mask and values
1859    /// are passed in `futexv`. `flags` are currently unused and hence `0` must be passed.
1860    #[derive(Debug)]
1861    pub struct FutexWaitV {
1862        futexv: { *const types::FutexWaitV },
1863        nr_futex: { u32 },
1864        ;;
1865        flags: u32 = 0
1866    }
1867
1868    pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1869
1870    pub fn build(self) -> Entry {
1871        let FutexWaitV { futexv, nr_futex, flags } = self;
1872
1873        let mut sqe = sqe_zeroed();
1874        sqe.opcode = Self::CODE;
1875        sqe.__bindgen_anon_2.addr = futexv as usize as _;
1876        sqe.len = nr_futex;
1877        sqe.__bindgen_anon_3.futex_flags = flags;
1878        Entry(sqe)
1879    }
1880}
1881
1882opcode! {
1883    /// Issue the equivalent of a `waitid(2)` system call.
1884    ///
1885    /// Available since kernel 6.7.
1886    #[derive(Debug)]
1887    pub struct WaitId {
1888        idtype: { libc::idtype_t },
1889        id: { libc::id_t },
1890        options: { libc::c_int },
1891        ;;
1892        infop: *const libc::siginfo_t = std::ptr::null(),
1893        flags: libc::c_uint = 0,
1894    }
1895
1896    pub const CODE = sys::IORING_OP_WAITID;
1897
1898    pub fn build(self) -> Entry {
1899        let mut sqe = sqe_zeroed();
1900        sqe.opcode = Self::CODE;
1901        sqe.fd = self.id as _;
1902        sqe.len = self.idtype as _;
1903        sqe.__bindgen_anon_3.waitid_flags = self.flags;
1904        sqe.__bindgen_anon_5.file_index = self.options as _;
1905        sqe.__bindgen_anon_1.addr2 = self.infop as _;
1906        Entry(sqe)
1907    }
1908}
1909
1910// === 6.8 ===
1911
1912opcode! {
1913    /// Install a fixed file descriptor
1914    ///
1915    /// Turns a direct descriptor into a regular file descriptor that can be later used by regular
1916    /// system calls that take a normal raw file descriptor
1917    #[derive(Debug)]
1918    pub struct FixedFdInstall {
1919        fd: { types::Fixed },
1920        file_flags: { u32 },
1921        ;;
1922    }
1923
1924    pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
1925
1926    pub fn build(self) -> Entry {
1927        let FixedFdInstall { fd, file_flags } = self;
1928
1929        let mut sqe = sqe_zeroed();
1930        sqe.opcode = Self::CODE;
1931        sqe.fd = fd.0 as _;
1932        sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
1933        sqe.__bindgen_anon_3.install_fd_flags = file_flags;
1934        Entry(sqe)
1935    }
1936}
1937
1938// === 6.9 ===
1939
1940opcode! {
1941    /// Perform file truncation, equivalent to `ftruncate(2)`.
1942    #[derive(Debug)]
1943    pub struct Ftruncate {
1944        fd: { impl sealed::UseFixed },
1945        len: { u64 },
1946        ;;
1947    }
1948
1949    pub const CODE = sys::IORING_OP_FTRUNCATE;
1950
1951    pub fn build(self) -> Entry {
1952        let Ftruncate { fd, len } = self;
1953
1954        let mut sqe = sqe_zeroed();
1955        sqe.opcode = Self::CODE;
1956        assign_fd!(sqe.fd = fd);
1957        sqe.__bindgen_anon_1.off = len;
1958        Entry(sqe)
1959    }
1960}
1961
1962// === 6.10 ===
1963
1964opcode! {
1965    /// Send a bundle of messages on a socket in a single request.
1966    pub struct SendBundle {
1967        fd: { impl sealed::UseFixed },
1968        buf_group: { u16 },
1969        ;;
1970        flags: i32 = 0,
1971        len: u32 = 0
1972    }
1973
1974    pub const CODE = sys::IORING_OP_SEND;
1975
1976    pub fn build(self) -> Entry {
1977        let SendBundle { fd, len, flags, buf_group } = self;
1978
1979        let mut sqe = sqe_zeroed();
1980        sqe.opcode = Self::CODE;
1981        assign_fd!(sqe.fd = fd);
1982        sqe.len = len;
1983        sqe.__bindgen_anon_3.msg_flags = flags as _;
1984        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1985        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1986        sqe.__bindgen_anon_4.buf_group = buf_group;
1987        Entry(sqe)
1988    }
1989}
1990
1991opcode! {
1992    /// Receive a bundle of buffers from a socket.
1993    ///
1994    /// Parameter
1995    ///     buf_group: The id of the provided buffer pool to use for the bundle.
1996    ///
1997    /// Note that as of kernel 6.10 first recv always gets a single buffer, while second
1998    /// obtains the bundle of remaining buffers. This behavior may change in the future.
1999    ///
2000    /// Bundle variant is available since kernel 6.10
2001    pub struct RecvBundle {
2002        fd: { impl sealed::UseFixed },
2003        buf_group: { u16 },
2004        ;;
2005        flags: i32 = 0
2006    }
2007
2008    pub const CODE = sys::IORING_OP_RECV;
2009
2010    pub fn build(self) -> Entry {
2011        let RecvBundle { fd, buf_group, flags } = self;
2012
2013        let mut sqe = sqe_zeroed();
2014        sqe.opcode = Self::CODE;
2015        assign_fd!(sqe.fd = fd);
2016        sqe.__bindgen_anon_3.msg_flags = flags as _;
2017        sqe.__bindgen_anon_4.buf_group = buf_group;
2018        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2019        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2020        Entry(sqe)
2021    }
2022}
2023
2024opcode! {
2025    /// Receive multiple messages from a socket as a bundle.
2026    ///
2027    /// Parameter:
2028    ///     buf_group: The id of the provided buffer pool to use for each received message.
2029    ///
2030    /// MSG_WAITALL should not be set in flags.
2031    ///
2032    /// The multishot version allows the application to issue a single receive request, which
2033    /// repeatedly posts a CQE when data is available. Each CQE will take a bundle of buffers
2034    /// out of a provided buffer pool for receiving. The application should check the flags of each CQE,
2035    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
2036    /// the multishot receive will be done and the application should issue a new request.
2037    ///
2038    /// Note that as of kernel 6.10 first CQE always gets a single buffer, while second
2039    /// obtains the bundle of remaining buffers. This behavior may change in the future.
2040    ///
2041    /// Multishot bundle variant is available since kernel 6.10.
2042    pub struct RecvMultiBundle {
2043        fd: { impl sealed::UseFixed },
2044        buf_group: { u16 },
2045        ;;
2046        flags: i32 = 0
2047    }
2048
2049    pub const CODE = sys::IORING_OP_RECV;
2050
2051    pub fn build(self) -> Entry {
2052        let RecvMultiBundle { fd, buf_group, flags } = self;
2053
2054        let mut sqe = sqe_zeroed();
2055        sqe.opcode = Self::CODE;
2056        assign_fd!(sqe.fd = fd);
2057        sqe.__bindgen_anon_3.msg_flags = flags as _;
2058        sqe.__bindgen_anon_4.buf_group = buf_group;
2059        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2060        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2061        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2062        Entry(sqe)
2063    }
2064}