1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: i32 = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: i32 = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
237 buf: { *mut u8 },
238 len: { u32 },
239 buf_index: { u16 },
240 ;;
241 ioprio: u16 = 0,
242 offset: u64 = 0,
244 rw_flags: i32 = 0
247 }
248
249 pub const CODE = sys::IORING_OP_READ_FIXED;
250
251 pub fn build(self) -> Entry {
252 let ReadFixed {
253 fd,
254 buf, len, offset,
255 buf_index,
256 ioprio, rw_flags
257 } = self;
258
259 let mut sqe = sqe_zeroed();
260 sqe.opcode = Self::CODE;
261 assign_fd!(sqe.fd = fd);
262 sqe.ioprio = ioprio;
263 sqe.__bindgen_anon_2.addr = buf as _;
264 sqe.len = len;
265 sqe.__bindgen_anon_1.off = offset;
266 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
267 sqe.__bindgen_anon_4.buf_index = buf_index;
268 Entry(sqe)
269 }
270}
271
272opcode! {
273 #[derive(Debug)]
278 pub struct WriteFixed {
279 fd: { impl sealed::UseFixed },
280 buf: { *const u8 },
281 len: { u32 },
282 buf_index: { u16 },
283 ;;
284 ioprio: u16 = 0,
285 offset: u64 = 0,
287 rw_flags: i32 = 0
290 }
291
292 pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294 pub fn build(self) -> Entry {
295 let WriteFixed {
296 fd,
297 buf, len, offset,
298 buf_index,
299 ioprio, rw_flags
300 } = self;
301
302 let mut sqe = sqe_zeroed();
303 sqe.opcode = Self::CODE;
304 assign_fd!(sqe.fd = fd);
305 sqe.ioprio = ioprio;
306 sqe.__bindgen_anon_2.addr = buf as _;
307 sqe.len = len;
308 sqe.__bindgen_anon_1.off = offset;
309 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
310 sqe.__bindgen_anon_4.buf_index = buf_index;
311 Entry(sqe)
312 }
313}
314
315opcode! {
316 #[derive(Debug)]
328 pub struct PollAdd {
329 fd: { impl sealed::UseFixed },
332 flags: { u32 },
333 ;;
334 multi: bool = false
335 }
336
337 pub const CODE = sys::IORING_OP_POLL_ADD;
338
339 pub fn build(self) -> Entry {
340 let PollAdd { fd, flags, multi } = self;
341
342 let mut sqe = sqe_zeroed();
343 sqe.opcode = Self::CODE;
344 assign_fd!(sqe.fd = fd);
345 if multi {
346 sqe.len = sys::IORING_POLL_ADD_MULTI;
347 }
348
349 #[cfg(target_endian = "little")] {
350 sqe.__bindgen_anon_3.poll32_events = flags;
351 }
352
353 #[cfg(target_endian = "big")] {
354 let x = flags << 16;
355 let y = flags >> 16;
356 let flags = x | y;
357 sqe.__bindgen_anon_3.poll32_events = flags;
358 }
359
360 Entry(sqe)
361 }
362}
363
364opcode! {
365 #[derive(Debug)]
370 pub struct PollRemove {
371 user_data: { u64 }
372 ;;
373 }
374
375 pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377 pub fn build(self) -> Entry {
378 let PollRemove { user_data } = self;
379
380 let mut sqe = sqe_zeroed();
381 sqe.opcode = Self::CODE;
382 sqe.fd = -1;
383 sqe.__bindgen_anon_2.addr = user_data;
384 Entry(sqe)
385 }
386}
387
388opcode! {
389 #[derive(Debug)]
391 pub struct SyncFileRange {
392 fd: { impl sealed::UseFixed },
393 len: { u32 },
394 ;;
395 offset: u64 = 0,
397 flags: u32 = 0
399 }
400
401 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403 pub fn build(self) -> Entry {
404 let SyncFileRange {
405 fd,
406 len, offset,
407 flags
408 } = self;
409
410 let mut sqe = sqe_zeroed();
411 sqe.opcode = Self::CODE;
412 assign_fd!(sqe.fd = fd);
413 sqe.len = len;
414 sqe.__bindgen_anon_1.off = offset;
415 sqe.__bindgen_anon_3.sync_range_flags = flags;
416 Entry(sqe)
417 }
418}
419
420opcode! {
421 #[derive(Debug)]
426 pub struct SendMsg {
427 fd: { impl sealed::UseFixed },
428 msg: { *const libc::msghdr },
429 ;;
430 ioprio: u16 = 0,
431 flags: u32 = 0
432 }
433
434 pub const CODE = sys::IORING_OP_SENDMSG;
435
436 pub fn build(self) -> Entry {
437 let SendMsg { fd, msg, ioprio, flags } = self;
438
439 let mut sqe = sqe_zeroed();
440 sqe.opcode = Self::CODE;
441 assign_fd!(sqe.fd = fd);
442 sqe.ioprio = ioprio;
443 sqe.__bindgen_anon_2.addr = msg as _;
444 sqe.len = 1;
445 sqe.__bindgen_anon_3.msg_flags = flags;
446 Entry(sqe)
447 }
448}
449
450opcode! {
451 #[derive(Debug)]
455 pub struct RecvMsg {
456 fd: { impl sealed::UseFixed },
457 msg: { *mut libc::msghdr },
458 ;;
459 ioprio: u16 = 0,
460 flags: u32 = 0,
461 buf_group: u16 = 0
462 }
463
464 pub const CODE = sys::IORING_OP_RECVMSG;
465
466 pub fn build(self) -> Entry {
467 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469 let mut sqe = sqe_zeroed();
470 sqe.opcode = Self::CODE;
471 assign_fd!(sqe.fd = fd);
472 sqe.ioprio = ioprio;
473 sqe.__bindgen_anon_2.addr = msg as _;
474 sqe.len = 1;
475 sqe.__bindgen_anon_3.msg_flags = flags;
476 sqe.__bindgen_anon_4.buf_group = buf_group;
477 Entry(sqe)
478 }
479}
480
481opcode! {
482 #[derive(Debug)]
505 pub struct RecvMsgMulti {
506 fd: { impl sealed::UseFixed },
507 msg: { *const libc::msghdr },
508 buf_group: { u16 },
509 ;;
510 ioprio: u16 = 0,
511 flags: u32 = 0
512 }
513
514 pub const CODE = sys::IORING_OP_RECVMSG;
515
516 pub fn build(self) -> Entry {
517 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519 let mut sqe = sqe_zeroed();
520 sqe.opcode = Self::CODE;
521 assign_fd!(sqe.fd = fd);
522 sqe.__bindgen_anon_2.addr = msg as _;
523 sqe.len = 1;
524 sqe.__bindgen_anon_3.msg_flags = flags;
525 sqe.__bindgen_anon_4.buf_group = buf_group;
526 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528 Entry(sqe)
529 }
530}
531
532opcode! {
533 #[derive(Debug)]
542 pub struct Timeout {
543 timespec: { *const types::Timespec },
544 ;;
545 count: u32 = 0,
549
550 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
551 }
552
553 pub const CODE = sys::IORING_OP_TIMEOUT;
554
555 pub fn build(self) -> Entry {
556 let Timeout { timespec, count, flags } = self;
557
558 let mut sqe = sqe_zeroed();
559 sqe.opcode = Self::CODE;
560 sqe.fd = -1;
561 sqe.__bindgen_anon_2.addr = timespec as _;
562 sqe.len = 1;
563 sqe.__bindgen_anon_1.off = count as _;
564 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
565 Entry(sqe)
566 }
567}
568
569opcode! {
572 pub struct TimeoutRemove {
574 user_data: { u64 },
575 ;;
576 }
577
578 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
579
580 pub fn build(self) -> Entry {
581 let TimeoutRemove { user_data } = self;
582
583 let mut sqe = sqe_zeroed();
584 sqe.opcode = Self::CODE;
585 sqe.fd = -1;
586 sqe.__bindgen_anon_2.addr = user_data;
587 Entry(sqe)
588 }
589}
590
591opcode! {
592 pub struct TimeoutUpdate {
595 user_data: { u64 },
596 timespec: { *const types::Timespec },
597 ;;
598 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
599 }
600
601 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
602
603 pub fn build(self) -> Entry {
604 let TimeoutUpdate { user_data, timespec, flags } = self;
605
606 let mut sqe = sqe_zeroed();
607 sqe.opcode = Self::CODE;
608 sqe.fd = -1;
609 sqe.__bindgen_anon_1.off = timespec as _;
610 sqe.__bindgen_anon_2.addr = user_data;
611 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
612 Entry(sqe)
613 }
614}
615
616opcode! {
617 pub struct Accept {
619 fd: { impl sealed::UseFixed },
620 addr: { *mut libc::sockaddr },
621 addrlen: { *mut libc::socklen_t },
622 ;;
623 file_index: Option<types::DestinationSlot> = None,
624 flags: i32 = 0
625 }
626
627 pub const CODE = sys::IORING_OP_ACCEPT;
628
629 pub fn build(self) -> Entry {
630 let Accept { fd, addr, addrlen, file_index, flags } = self;
631
632 let mut sqe = sqe_zeroed();
633 sqe.opcode = Self::CODE;
634 assign_fd!(sqe.fd = fd);
635 sqe.__bindgen_anon_2.addr = addr as _;
636 sqe.__bindgen_anon_1.addr2 = addrlen as _;
637 sqe.__bindgen_anon_3.accept_flags = flags as _;
638 if let Some(dest) = file_index {
639 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
640 }
641 Entry(sqe)
642 }
643}
644
645opcode! {
646 pub struct SetSockOpt {
648 fd: { impl sealed::UseFixed },
649 level: { u32 },
650 optname: { u32 },
651 optval: { *const libc::c_void },
652 optlen: { u32 },
653 ;;
654 flags: u32 = 0
655 }
656
657 pub const CODE = sys::IORING_OP_URING_CMD;
658
659 pub fn build(self) -> Entry {
660 let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
661 let mut sqe = sqe_zeroed();
662 sqe.opcode = Self::CODE;
663 assign_fd!(sqe.fd = fd);
664 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
665
666 sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
667 sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
668 sqe.__bindgen_anon_3.uring_cmd_flags = flags;
669 sqe.__bindgen_anon_5.optlen = optlen;
670 unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
671 Entry(sqe)
672 }
673}
674
675opcode! {
676 pub struct AsyncCancel {
678 user_data: { u64 }
679 ;;
680
681 }
683
684 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
685
686 pub fn build(self) -> Entry {
687 let AsyncCancel { user_data } = self;
688
689 let mut sqe = sqe_zeroed();
690 sqe.opcode = Self::CODE;
691 sqe.fd = -1;
692 sqe.__bindgen_anon_2.addr = user_data;
693 Entry(sqe)
694 }
695}
696
697opcode! {
698 pub struct LinkTimeout {
702 timespec: { *const types::Timespec },
703 ;;
704 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
705 }
706
707 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
708
709 pub fn build(self) -> Entry {
710 let LinkTimeout { timespec, flags } = self;
711
712 let mut sqe = sqe_zeroed();
713 sqe.opcode = Self::CODE;
714 sqe.fd = -1;
715 sqe.__bindgen_anon_2.addr = timespec as _;
716 sqe.len = 1;
717 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
718 Entry(sqe)
719 }
720}
721
722opcode! {
723 pub struct Connect {
725 fd: { impl sealed::UseFixed },
726 addr: { *const libc::sockaddr },
727 addrlen: { libc::socklen_t }
728 ;;
729 }
730
731 pub const CODE = sys::IORING_OP_CONNECT;
732
733 pub fn build(self) -> Entry {
734 let Connect { fd, addr, addrlen } = self;
735
736 let mut sqe = sqe_zeroed();
737 sqe.opcode = Self::CODE;
738 assign_fd!(sqe.fd = fd);
739 sqe.__bindgen_anon_2.addr = addr as _;
740 sqe.__bindgen_anon_1.off = addrlen as _;
741 Entry(sqe)
742 }
743}
744
745opcode! {
748 pub struct Fallocate {
750 fd: { impl sealed::UseFixed },
751 len: { u64 },
752 ;;
753 offset: u64 = 0,
754 mode: i32 = 0
755 }
756
757 pub const CODE = sys::IORING_OP_FALLOCATE;
758
759 pub fn build(self) -> Entry {
760 let Fallocate { fd, len, offset, mode } = self;
761
762 let mut sqe = sqe_zeroed();
763 sqe.opcode = Self::CODE;
764 assign_fd!(sqe.fd = fd);
765 sqe.__bindgen_anon_2.addr = len;
766 sqe.len = mode as _;
767 sqe.__bindgen_anon_1.off = offset;
768 Entry(sqe)
769 }
770}
771
772opcode! {
773 pub struct OpenAt {
775 dirfd: { impl sealed::UseFd },
776 pathname: { *const libc::c_char },
777 ;;
778 file_index: Option<types::DestinationSlot> = None,
779 flags: i32 = 0,
780 mode: libc::mode_t = 0
781 }
782
783 pub const CODE = sys::IORING_OP_OPENAT;
784
785 pub fn build(self) -> Entry {
786 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
787
788 let mut sqe = sqe_zeroed();
789 sqe.opcode = Self::CODE;
790 sqe.fd = dirfd;
791 sqe.__bindgen_anon_2.addr = pathname as _;
792 sqe.len = mode;
793 sqe.__bindgen_anon_3.open_flags = flags as _;
794 if let Some(dest) = file_index {
795 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
796 }
797 Entry(sqe)
798 }
799}
800
801opcode! {
802 pub struct Close {
806 fd: { impl sealed::UseFixed },
807 ;;
808 }
809
810 pub const CODE = sys::IORING_OP_CLOSE;
811
812 pub fn build(self) -> Entry {
813 let Close { fd } = self;
814
815 let mut sqe = sqe_zeroed();
816 sqe.opcode = Self::CODE;
817 match fd {
818 sealed::Target::Fd(fd) => sqe.fd = fd,
819 sealed::Target::Fixed(idx) => {
820 sqe.fd = 0;
821 sqe.__bindgen_anon_5.file_index = idx + 1;
822 }
823 }
824 Entry(sqe)
825 }
826}
827
828opcode! {
829 pub struct FilesUpdate {
833 fds: { *const RawFd },
834 len: { u32 },
835 ;;
836 offset: i32 = 0
837 }
838
839 pub const CODE = sys::IORING_OP_FILES_UPDATE;
840
841 pub fn build(self) -> Entry {
842 let FilesUpdate { fds, len, offset } = self;
843
844 let mut sqe = sqe_zeroed();
845 sqe.opcode = Self::CODE;
846 sqe.fd = -1;
847 sqe.__bindgen_anon_2.addr = fds as _;
848 sqe.len = len;
849 sqe.__bindgen_anon_1.off = offset as _;
850 Entry(sqe)
851 }
852}
853
854opcode! {
855 pub struct Statx {
857 dirfd: { impl sealed::UseFd },
858 pathname: { *const libc::c_char },
859 statxbuf: { *mut types::statx },
860 ;;
861 flags: i32 = 0,
862 mask: u32 = 0
863 }
864
865 pub const CODE = sys::IORING_OP_STATX;
866
867 pub fn build(self) -> Entry {
868 let Statx {
869 dirfd, pathname, statxbuf,
870 flags, mask
871 } = self;
872
873 let mut sqe = sqe_zeroed();
874 sqe.opcode = Self::CODE;
875 sqe.fd = dirfd;
876 sqe.__bindgen_anon_2.addr = pathname as _;
877 sqe.len = mask;
878 sqe.__bindgen_anon_1.off = statxbuf as _;
879 sqe.__bindgen_anon_3.statx_flags = flags as _;
880 Entry(sqe)
881 }
882}
883
884opcode! {
885 pub struct Read {
896 fd: { impl sealed::UseFixed },
897 buf: { *mut u8 },
898 len: { u32 },
899 ;;
900 offset: u64 = 0,
906 ioprio: u16 = 0,
907 rw_flags: i32 = 0,
908 buf_group: u16 = 0
909 }
910
911 pub const CODE = sys::IORING_OP_READ;
912
913 pub fn build(self) -> Entry {
914 let Read {
915 fd,
916 buf, len, offset,
917 ioprio, rw_flags,
918 buf_group
919 } = self;
920
921 let mut sqe = sqe_zeroed();
922 sqe.opcode = Self::CODE;
923 assign_fd!(sqe.fd = fd);
924 sqe.ioprio = ioprio;
925 sqe.__bindgen_anon_2.addr = buf as _;
926 sqe.len = len;
927 sqe.__bindgen_anon_1.off = offset;
928 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
929 sqe.__bindgen_anon_4.buf_group = buf_group;
930 Entry(sqe)
931 }
932}
933
934opcode! {
935 pub struct Write {
946 fd: { impl sealed::UseFixed },
947 buf: { *const u8 },
948 len: { u32 },
949 ;;
950 offset: u64 = 0,
956 ioprio: u16 = 0,
957 rw_flags: i32 = 0
958 }
959
960 pub const CODE = sys::IORING_OP_WRITE;
961
962 pub fn build(self) -> Entry {
963 let Write {
964 fd,
965 buf, len, offset,
966 ioprio, rw_flags
967 } = self;
968
969 let mut sqe = sqe_zeroed();
970 sqe.opcode = Self::CODE;
971 assign_fd!(sqe.fd = fd);
972 sqe.ioprio = ioprio;
973 sqe.__bindgen_anon_2.addr = buf as _;
974 sqe.len = len;
975 sqe.__bindgen_anon_1.off = offset;
976 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
977 Entry(sqe)
978 }
979}
980
981opcode! {
982 pub struct Fadvise {
984 fd: { impl sealed::UseFixed },
985 len: { libc::off_t },
986 advice: { i32 },
987 ;;
988 offset: u64 = 0,
989 }
990
991 pub const CODE = sys::IORING_OP_FADVISE;
992
993 pub fn build(self) -> Entry {
994 let Fadvise { fd, len, advice, offset } = self;
995
996 let mut sqe = sqe_zeroed();
997 sqe.opcode = Self::CODE;
998 assign_fd!(sqe.fd = fd);
999 sqe.len = len as _;
1000 sqe.__bindgen_anon_1.off = offset;
1001 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1002 Entry(sqe)
1003 }
1004}
1005
1006opcode! {
1007 pub struct Madvise {
1009 addr: { *const libc::c_void },
1010 len: { libc::off_t },
1011 advice: { i32 },
1012 ;;
1013 }
1014
1015 pub const CODE = sys::IORING_OP_MADVISE;
1016
1017 pub fn build(self) -> Entry {
1018 let Madvise { addr, len, advice } = self;
1019
1020 let mut sqe = sqe_zeroed();
1021 sqe.opcode = Self::CODE;
1022 sqe.fd = -1;
1023 sqe.__bindgen_anon_2.addr = addr as _;
1024 sqe.len = len as _;
1025 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1026 Entry(sqe)
1027 }
1028}
1029
1030opcode! {
1031 pub struct Send {
1033 fd: { impl sealed::UseFixed },
1034 buf: { *const u8 },
1035 len: { u32 },
1036 ;;
1037 flags: i32 = 0,
1038
1039 dest_addr: *const libc::sockaddr = core::ptr::null(),
1044 dest_addr_len: libc::socklen_t = 0,
1045 }
1046
1047 pub const CODE = sys::IORING_OP_SEND;
1048
1049 pub fn build(self) -> Entry {
1050 let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1051
1052 let mut sqe = sqe_zeroed();
1053 sqe.opcode = Self::CODE;
1054 assign_fd!(sqe.fd = fd);
1055 sqe.__bindgen_anon_2.addr = buf as _;
1056 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1057 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1058 sqe.len = len;
1059 sqe.__bindgen_anon_3.msg_flags = flags as _;
1060 Entry(sqe)
1061 }
1062}
1063
1064opcode! {
1065 pub struct Recv {
1067 fd: { impl sealed::UseFixed },
1068 buf: { *mut u8 },
1069 len: { u32 },
1070 ;;
1071 flags: i32 = 0,
1072 buf_group: u16 = 0
1073 }
1074
1075 pub const CODE = sys::IORING_OP_RECV;
1076
1077 pub fn build(self) -> Entry {
1078 let Recv { fd, buf, len, flags, buf_group } = self;
1079
1080 let mut sqe = sqe_zeroed();
1081 sqe.opcode = Self::CODE;
1082 assign_fd!(sqe.fd = fd);
1083 sqe.__bindgen_anon_2.addr = buf as _;
1084 sqe.len = len;
1085 sqe.__bindgen_anon_3.msg_flags = flags as _;
1086 sqe.__bindgen_anon_4.buf_group = buf_group;
1087 Entry(sqe)
1088 }
1089}
1090
1091opcode! {
1092 pub struct RecvMulti {
1108 fd: { impl sealed::UseFixed },
1109 buf_group: { u16 },
1110 ;;
1111 flags: i32 = 0,
1112 len: u32 = 0,
1113 }
1114
1115 pub const CODE = sys::IORING_OP_RECV;
1116
1117 pub fn build(self) -> Entry {
1118 let RecvMulti { fd, buf_group, flags, len } = self;
1119
1120 let mut sqe = sqe_zeroed();
1121 sqe.opcode = Self::CODE;
1122 assign_fd!(sqe.fd = fd);
1123 sqe.len = len;
1124 sqe.__bindgen_anon_3.msg_flags = flags as _;
1125 sqe.__bindgen_anon_4.buf_group = buf_group;
1126 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1127 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1128 Entry(sqe)
1129 }
1130}
1131
1132opcode! {
1133 pub struct OpenAt2 {
1135 dirfd: { impl sealed::UseFd },
1136 pathname: { *const libc::c_char },
1137 how: { *const types::OpenHow }
1138 ;;
1139 file_index: Option<types::DestinationSlot> = None,
1140 }
1141
1142 pub const CODE = sys::IORING_OP_OPENAT2;
1143
1144 pub fn build(self) -> Entry {
1145 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1146
1147 let mut sqe = sqe_zeroed();
1148 sqe.opcode = Self::CODE;
1149 sqe.fd = dirfd;
1150 sqe.__bindgen_anon_2.addr = pathname as _;
1151 sqe.len = mem::size_of::<sys::open_how>() as _;
1152 sqe.__bindgen_anon_1.off = how as _;
1153 if let Some(dest) = file_index {
1154 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1155 }
1156 Entry(sqe)
1157 }
1158}
1159
1160opcode! {
1161 pub struct EpollCtl {
1163 epfd: { impl sealed::UseFixed },
1164 fd: { impl sealed::UseFd },
1165 op: { i32 },
1166 ev: { *const types::epoll_event },
1167 ;;
1168 }
1169
1170 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1171
1172 pub fn build(self) -> Entry {
1173 let EpollCtl { epfd, fd, op, ev } = self;
1174
1175 let mut sqe = sqe_zeroed();
1176 sqe.opcode = Self::CODE;
1177 assign_fd!(sqe.fd = epfd);
1178 sqe.__bindgen_anon_2.addr = ev as _;
1179 sqe.len = op as _;
1180 sqe.__bindgen_anon_1.off = fd as _;
1181 Entry(sqe)
1182 }
1183}
1184
1185opcode! {
1188 pub struct Splice {
1193 fd_in: { impl sealed::UseFixed },
1194 off_in: { i64 },
1195 fd_out: { impl sealed::UseFixed },
1196 off_out: { i64 },
1197 len: { u32 },
1198 ;;
1199 flags: u32 = 0
1201 }
1202
1203 pub const CODE = sys::IORING_OP_SPLICE;
1204
1205 pub fn build(self) -> Entry {
1206 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1207
1208 let mut sqe = sqe_zeroed();
1209 sqe.opcode = Self::CODE;
1210 assign_fd!(sqe.fd = fd_out);
1211 sqe.len = len;
1212 sqe.__bindgen_anon_1.off = off_out as _;
1213
1214 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1215 sealed::Target::Fd(fd) => fd,
1216 sealed::Target::Fixed(idx) => {
1217 flags |= sys::SPLICE_F_FD_IN_FIXED;
1218 idx as _
1219 }
1220 };
1221
1222 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1223 sqe.__bindgen_anon_3.splice_flags = flags;
1224 Entry(sqe)
1225 }
1226}
1227
1228opcode! {
1229 pub struct ProvideBuffers {
1233 addr: { *mut u8 },
1234 len: { i32 },
1235 nbufs: { u16 },
1236 bgid: { u16 },
1237 bid: { u16 }
1238 ;;
1239 }
1240
1241 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1242
1243 pub fn build(self) -> Entry {
1244 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1245
1246 let mut sqe = sqe_zeroed();
1247 sqe.opcode = Self::CODE;
1248 sqe.fd = nbufs as _;
1249 sqe.__bindgen_anon_2.addr = addr as _;
1250 sqe.len = len as _;
1251 sqe.__bindgen_anon_1.off = bid as _;
1252 sqe.__bindgen_anon_4.buf_group = bgid;
1253 Entry(sqe)
1254 }
1255}
1256
1257opcode! {
1258 pub struct RemoveBuffers {
1261 nbufs: { u16 },
1262 bgid: { u16 }
1263 ;;
1264 }
1265
1266 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1267
1268 pub fn build(self) -> Entry {
1269 let RemoveBuffers { nbufs, bgid } = self;
1270
1271 let mut sqe = sqe_zeroed();
1272 sqe.opcode = Self::CODE;
1273 sqe.fd = nbufs as _;
1274 sqe.__bindgen_anon_4.buf_group = bgid;
1275 Entry(sqe)
1276 }
1277}
1278
1279opcode! {
1282 pub struct Tee {
1284 fd_in: { impl sealed::UseFixed },
1285 fd_out: { impl sealed::UseFixed },
1286 len: { u32 }
1287 ;;
1288 flags: u32 = 0
1289 }
1290
1291 pub const CODE = sys::IORING_OP_TEE;
1292
1293 pub fn build(self) -> Entry {
1294 let Tee { fd_in, fd_out, len, mut flags } = self;
1295
1296 let mut sqe = sqe_zeroed();
1297 sqe.opcode = Self::CODE;
1298
1299 assign_fd!(sqe.fd = fd_out);
1300 sqe.len = len;
1301
1302 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1303 sealed::Target::Fd(fd) => fd,
1304 sealed::Target::Fixed(idx) => {
1305 flags |= sys::SPLICE_F_FD_IN_FIXED;
1306 idx as _
1307 }
1308 };
1309
1310 sqe.__bindgen_anon_3.splice_flags = flags;
1311
1312 Entry(sqe)
1313 }
1314}
1315
1316opcode! {
1319 pub struct Shutdown {
1322 fd: { impl sealed::UseFixed },
1323 how: { i32 },
1324 ;;
1325 }
1326
1327 pub const CODE = sys::IORING_OP_SHUTDOWN;
1328
1329 pub fn build(self) -> Entry {
1330 let Shutdown { fd, how } = self;
1331
1332 let mut sqe = sqe_zeroed();
1333 sqe.opcode = Self::CODE;
1334 assign_fd!(sqe.fd = fd);
1335 sqe.len = how as _;
1336 Entry(sqe)
1337 }
1338}
1339
1340opcode! {
1341 pub struct RenameAt {
1344 olddirfd: { impl sealed::UseFd },
1345 oldpath: { *const libc::c_char },
1346 newdirfd: { impl sealed::UseFd },
1347 newpath: { *const libc::c_char },
1348 ;;
1349 flags: u32 = 0
1350 }
1351
1352 pub const CODE = sys::IORING_OP_RENAMEAT;
1353
1354 pub fn build(self) -> Entry {
1355 let RenameAt {
1356 olddirfd, oldpath,
1357 newdirfd, newpath,
1358 flags
1359 } = self;
1360
1361 let mut sqe = sqe_zeroed();
1362 sqe.opcode = Self::CODE;
1363 sqe.fd = olddirfd;
1364 sqe.__bindgen_anon_2.addr = oldpath as _;
1365 sqe.len = newdirfd as _;
1366 sqe.__bindgen_anon_1.off = newpath as _;
1367 sqe.__bindgen_anon_3.rename_flags = flags;
1368 Entry(sqe)
1369 }
1370}
1371
1372opcode! {
1373 pub struct UnlinkAt {
1376 dirfd: { impl sealed::UseFd },
1377 pathname: { *const libc::c_char },
1378 ;;
1379 flags: i32 = 0
1380 }
1381
1382 pub const CODE = sys::IORING_OP_UNLINKAT;
1383
1384 pub fn build(self) -> Entry {
1385 let UnlinkAt { dirfd, pathname, flags } = self;
1386
1387 let mut sqe = sqe_zeroed();
1388 sqe.opcode = Self::CODE;
1389 sqe.fd = dirfd;
1390 sqe.__bindgen_anon_2.addr = pathname as _;
1391 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1392 Entry(sqe)
1393 }
1394}
1395
1396opcode! {
1399 pub struct MkDirAt {
1401 dirfd: { impl sealed::UseFd },
1402 pathname: { *const libc::c_char },
1403 ;;
1404 mode: libc::mode_t = 0
1405 }
1406
1407 pub const CODE = sys::IORING_OP_MKDIRAT;
1408
1409 pub fn build(self) -> Entry {
1410 let MkDirAt { dirfd, pathname, mode } = self;
1411
1412 let mut sqe = sqe_zeroed();
1413 sqe.opcode = Self::CODE;
1414 sqe.fd = dirfd;
1415 sqe.__bindgen_anon_2.addr = pathname as _;
1416 sqe.len = mode;
1417 Entry(sqe)
1418 }
1419}
1420
1421opcode! {
1422 pub struct SymlinkAt {
1424 newdirfd: { impl sealed::UseFd },
1425 target: { *const libc::c_char },
1426 linkpath: { *const libc::c_char },
1427 ;;
1428 }
1429
1430 pub const CODE = sys::IORING_OP_SYMLINKAT;
1431
1432 pub fn build(self) -> Entry {
1433 let SymlinkAt { newdirfd, target, linkpath } = self;
1434
1435 let mut sqe = sqe_zeroed();
1436 sqe.opcode = Self::CODE;
1437 sqe.fd = newdirfd;
1438 sqe.__bindgen_anon_2.addr = target as _;
1439 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1440 Entry(sqe)
1441 }
1442}
1443
1444opcode! {
1445 pub struct LinkAt {
1447 olddirfd: { impl sealed::UseFd },
1448 oldpath: { *const libc::c_char },
1449 newdirfd: { impl sealed::UseFd },
1450 newpath: { *const libc::c_char },
1451 ;;
1452 flags: i32 = 0
1453 }
1454
1455 pub const CODE = sys::IORING_OP_LINKAT;
1456
1457 pub fn build(self) -> Entry {
1458 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1459
1460 let mut sqe = sqe_zeroed();
1461 sqe.opcode = Self::CODE;
1462 sqe.fd = olddirfd as _;
1463 sqe.__bindgen_anon_2.addr = oldpath as _;
1464 sqe.len = newdirfd as _;
1465 sqe.__bindgen_anon_1.addr2 = newpath as _;
1466 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1467 Entry(sqe)
1468 }
1469}
1470
1471opcode! {
1474 pub struct GetXattr {
1476 name: { *const libc::c_char },
1477 value: { *mut libc::c_void },
1478 path: { *const libc::c_char },
1479 len: { u32 },
1480 ;;
1481 }
1482
1483 pub const CODE = sys::IORING_OP_GETXATTR;
1484
1485 pub fn build(self) -> Entry {
1486 let GetXattr { name, value, path, len } = self;
1487
1488 let mut sqe = sqe_zeroed();
1489 sqe.opcode = Self::CODE;
1490 sqe.__bindgen_anon_2.addr = name as _;
1491 sqe.len = len;
1492 sqe.__bindgen_anon_1.off = value as _;
1493 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1494 sqe.__bindgen_anon_3.xattr_flags = 0;
1495 Entry(sqe)
1496 }
1497}
1498
1499opcode! {
1500 pub struct SetXattr {
1502 name: { *const libc::c_char },
1503 value: { *const libc::c_void },
1504 path: { *const libc::c_char },
1505 len: { u32 },
1506 ;;
1507 flags: i32 = 0
1508 }
1509
1510 pub const CODE = sys::IORING_OP_SETXATTR;
1511
1512 pub fn build(self) -> Entry {
1513 let SetXattr { name, value, path, flags, len } = self;
1514
1515 let mut sqe = sqe_zeroed();
1516 sqe.opcode = Self::CODE;
1517 sqe.__bindgen_anon_2.addr = name as _;
1518 sqe.len = len;
1519 sqe.__bindgen_anon_1.off = value as _;
1520 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1521 sqe.__bindgen_anon_3.xattr_flags = flags as _;
1522 Entry(sqe)
1523 }
1524}
1525
1526opcode! {
1527 pub struct FGetXattr {
1529 fd: { impl sealed::UseFixed },
1530 name: { *const libc::c_char },
1531 value: { *mut libc::c_void },
1532 len: { u32 },
1533 ;;
1534 }
1535
1536 pub const CODE = sys::IORING_OP_FGETXATTR;
1537
1538 pub fn build(self) -> Entry {
1539 let FGetXattr { fd, name, value, len } = self;
1540
1541 let mut sqe = sqe_zeroed();
1542 sqe.opcode = Self::CODE;
1543 assign_fd!(sqe.fd = fd);
1544 sqe.__bindgen_anon_2.addr = name as _;
1545 sqe.len = len;
1546 sqe.__bindgen_anon_1.off = value as _;
1547 sqe.__bindgen_anon_3.xattr_flags = 0;
1548 Entry(sqe)
1549 }
1550}
1551
1552opcode! {
1553 pub struct FSetXattr {
1555 fd: { impl sealed::UseFixed },
1556 name: { *const libc::c_char },
1557 value: { *const libc::c_void },
1558 len: { u32 },
1559 ;;
1560 flags: i32 = 0
1561 }
1562
1563 pub const CODE = sys::IORING_OP_FSETXATTR;
1564
1565 pub fn build(self) -> Entry {
1566 let FSetXattr { fd, name, value, flags, len } = self;
1567
1568 let mut sqe = sqe_zeroed();
1569 sqe.opcode = Self::CODE;
1570 assign_fd!(sqe.fd = fd);
1571 sqe.__bindgen_anon_2.addr = name as _;
1572 sqe.len = len;
1573 sqe.__bindgen_anon_1.off = value as _;
1574 sqe.__bindgen_anon_3.xattr_flags = flags as _;
1575 Entry(sqe)
1576 }
1577}
1578
1579opcode! {
1582 pub struct MsgRingData {
1584 ring_fd: { impl sealed::UseFd },
1585 result: { i32 },
1586 user_data: { u64 },
1587 user_flags: { Option<u32> },
1588 ;;
1589 opcode_flags: u32 = 0
1590 }
1591
1592 pub const CODE = sys::IORING_OP_MSG_RING;
1593
1594 pub fn build(self) -> Entry {
1595 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1596
1597 let mut sqe = sqe_zeroed();
1598 sqe.opcode = Self::CODE;
1599 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1600 sqe.fd = ring_fd;
1601 sqe.len = result as u32;
1602 sqe.__bindgen_anon_1.off = user_data;
1603 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1604 if let Some(flags) = user_flags {
1605 sqe.__bindgen_anon_5.file_index = flags;
1606 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1607 }
1608 Entry(sqe)
1609 }
1610}
1611
1612opcode! {
1615 pub struct AsyncCancel2 {
1619 builder: { types::CancelBuilder }
1620 ;;
1621 }
1622
1623 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1624
1625 pub fn build(self) -> Entry {
1626 let AsyncCancel2 { builder } = self;
1627
1628 let mut sqe = sqe_zeroed();
1629 sqe.opcode = Self::CODE;
1630 sqe.fd = builder.to_fd();
1631 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1632 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1633 Entry(sqe)
1634 }
1635}
1636
1637opcode! {
1638 pub struct UringCmd16 {
1640 fd: { impl sealed::UseFixed },
1641 cmd_op: { u32 },
1642 ;;
1643 buf_index: Option<u16> = None,
1646 cmd: [u8; 16] = [0u8; 16],
1648 addr: Option<u64> = None,
1653 }
1654
1655 pub const CODE = sys::IORING_OP_URING_CMD;
1656
1657 pub fn build(self) -> Entry {
1658 let UringCmd16 { fd, cmd_op, cmd, buf_index, addr } = self;
1659
1660 let mut sqe = sqe_zeroed();
1661 sqe.opcode = Self::CODE;
1662 assign_fd!(sqe.fd = fd);
1663 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1664 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1665 if let Some(buf_index) = buf_index {
1666 sqe.__bindgen_anon_4.buf_index = buf_index;
1667 unsafe {
1668 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1669 }
1670 }
1671 if let Some(addr) = addr {
1672 sqe.__bindgen_anon_2.addr = addr;
1673 }
1674 Entry(sqe)
1675 }
1676}
1677
1678opcode! {
1679 pub struct UringCmd80 {
1681 fd: { impl sealed::UseFixed },
1682 cmd_op: { u32 },
1683 ;;
1684 buf_index: Option<u16> = None,
1687 cmd: [u8; 80] = [0u8; 80],
1689 addr: Option<u64> = None,
1694 }
1695
1696 pub const CODE = sys::IORING_OP_URING_CMD;
1697
1698 pub fn build(self) -> Entry128 {
1699 let UringCmd80 { fd, cmd_op, cmd, buf_index, addr } = self;
1700
1701 let cmd1 = cmd[..16].try_into().unwrap();
1702 let cmd2 = cmd[16..].try_into().unwrap();
1703
1704 let mut sqe = sqe_zeroed();
1705 sqe.opcode = Self::CODE;
1706 assign_fd!(sqe.fd = fd);
1707 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1708 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1709 if let Some(buf_index) = buf_index {
1710 sqe.__bindgen_anon_4.buf_index = buf_index;
1711 unsafe {
1712 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1713 }
1714 }
1715 if let Some(addr) = addr {
1716 sqe.__bindgen_anon_2.addr = addr;
1717 }
1718 Entry128(Entry(sqe), cmd2)
1719 }
1720}
1721
1722opcode! {
1723 pub struct Socket {
1733 domain: { i32 },
1734 socket_type: { i32 },
1735 protocol: { i32 },
1736 ;;
1737 file_index: Option<types::DestinationSlot> = None,
1738 flags: i32 = 0,
1739 }
1740
1741 pub const CODE = sys::IORING_OP_SOCKET;
1742
1743 pub fn build(self) -> Entry {
1744 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1745
1746 let mut sqe = sqe_zeroed();
1747 sqe.opcode = Self::CODE;
1748 sqe.fd = domain as _;
1749 sqe.__bindgen_anon_1.off = socket_type as _;
1750 sqe.len = protocol as _;
1751 sqe.__bindgen_anon_3.rw_flags = flags as _;
1752 if let Some(dest) = file_index {
1753 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1754 }
1755 Entry(sqe)
1756 }
1757}
1758
1759opcode! {
1760 pub struct AcceptMulti {
1766 fd: { impl sealed::UseFixed },
1767 ;;
1768 allocate_file_index: bool = false,
1769 flags: i32 = 0
1770 }
1771
1772 pub const CODE = sys::IORING_OP_ACCEPT;
1773
1774 pub fn build(self) -> Entry {
1775 let AcceptMulti { fd, allocate_file_index, flags } = self;
1776
1777 let mut sqe = sqe_zeroed();
1778 sqe.opcode = Self::CODE;
1779 assign_fd!(sqe.fd = fd);
1780 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1781 sqe.__bindgen_anon_3.accept_flags = flags as _;
1784 if allocate_file_index {
1785 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1786 }
1787 Entry(sqe)
1788 }
1789}
1790
1791opcode! {
1794 pub struct MsgRingSendFd {
1796 ring_fd: { impl sealed::UseFd },
1797 fixed_slot_src: { types::Fixed },
1798 dest_slot_index: { types::DestinationSlot },
1799 user_data: { u64 },
1800 ;;
1801 opcode_flags: u32 = 0
1802 }
1803
1804 pub const CODE = sys::IORING_OP_MSG_RING;
1805
1806 pub fn build(self) -> Entry {
1807 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1808
1809 let mut sqe = sqe_zeroed();
1810 sqe.opcode = Self::CODE;
1811 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1812 sqe.fd = ring_fd;
1813 sqe.__bindgen_anon_1.off = user_data;
1814 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1815 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1816 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1817 Entry(sqe)
1818 }
1819}
1820
1821opcode! {
1824 pub struct SendZc {
1838 fd: { impl sealed::UseFixed },
1839 buf: { *const u8 },
1840 len: { u32 },
1841 ;;
1842 buf_index: Option<u16> = None,
1849 dest_addr: *const libc::sockaddr = core::ptr::null(),
1850 dest_addr_len: libc::socklen_t = 0,
1851 flags: i32 = 0,
1852 zc_flags: u16 = 0,
1853 }
1854
1855 pub const CODE = sys::IORING_OP_SEND_ZC;
1856
1857 pub fn build(self) -> Entry {
1858 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1859
1860 let mut sqe = sqe_zeroed();
1861 sqe.opcode = Self::CODE;
1862 assign_fd!(sqe.fd = fd);
1863 sqe.__bindgen_anon_2.addr = buf as _;
1864 sqe.len = len;
1865 sqe.__bindgen_anon_3.msg_flags = flags as _;
1866 sqe.ioprio = zc_flags;
1867 if let Some(buf_index) = buf_index {
1868 sqe.__bindgen_anon_4.buf_index = buf_index;
1869 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1870 }
1871 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1872 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1873 Entry(sqe)
1874 }
1875}
1876
1877opcode! {
1880 #[derive(Debug)]
1885 pub struct SendMsgZc {
1886 fd: { impl sealed::UseFixed },
1887 msg: { *const libc::msghdr },
1888 ;;
1889 ioprio: u16 = 0,
1890 flags: u32 = 0
1891 }
1892
1893 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1894
1895 pub fn build(self) -> Entry {
1896 let SendMsgZc { fd, msg, ioprio, flags } = self;
1897
1898 let mut sqe = sqe_zeroed();
1899 sqe.opcode = Self::CODE;
1900 assign_fd!(sqe.fd = fd);
1901 sqe.ioprio = ioprio;
1902 sqe.__bindgen_anon_2.addr = msg as _;
1903 sqe.len = 1;
1904 sqe.__bindgen_anon_3.msg_flags = flags;
1905 Entry(sqe)
1906 }
1907}
1908
1909opcode! {
1912 pub struct ReadMulti {
1914 fd: { impl sealed::UseFixed },
1915 len: { u32 },
1916 buf_group: { u16 },
1917 ;;
1918 offset: u64 = 0,
1919 }
1920
1921 pub const CODE = sys::IORING_OP_READ_MULTISHOT;
1922
1923 pub fn build(self) -> Entry {
1924 let Self { fd, len, buf_group, offset } = self;
1925
1926 let mut sqe = sqe_zeroed();
1927 sqe.opcode = Self::CODE;
1928 assign_fd!(sqe.fd = fd);
1929 sqe.__bindgen_anon_1.off = offset;
1930 sqe.len = len;
1931 sqe.__bindgen_anon_4.buf_group = buf_group;
1932 sqe.flags = crate::squeue::Flags::BUFFER_SELECT.bits();
1933 Entry(sqe)
1934 }
1935}
1936
1937opcode! {
1938 #[derive(Debug)]
1947 pub struct FutexWait {
1948 futex: { *const u32 },
1949 val: { u64 },
1950 mask: { u64 },
1951 futex_flags: { u32 },
1952 ;;
1953 flags: u32 = 0
1954 }
1955
1956 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1957
1958 pub fn build(self) -> Entry {
1959 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1960
1961 let mut sqe = sqe_zeroed();
1962 sqe.opcode = Self::CODE;
1963 sqe.fd = futex_flags as _;
1964 sqe.__bindgen_anon_2.addr = futex as usize as _;
1965 sqe.__bindgen_anon_1.off = val;
1966 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1967 sqe.__bindgen_anon_3.futex_flags = flags;
1968 Entry(sqe)
1969 }
1970}
1971
1972opcode! {
1973 #[derive(Debug)]
1981 pub struct FutexWake {
1982 futex: { *const u32 },
1983 val: { u64 },
1984 mask: { u64 },
1985 futex_flags: { u32 },
1986 ;;
1987 flags: u32 = 0
1988 }
1989
1990 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1991
1992 pub fn build(self) -> Entry {
1993 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1994
1995 let mut sqe = sqe_zeroed();
1996 sqe.opcode = Self::CODE;
1997 sqe.fd = futex_flags as _;
1998 sqe.__bindgen_anon_2.addr = futex as usize as _;
1999 sqe.__bindgen_anon_1.off = val;
2000 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
2001 sqe.__bindgen_anon_3.futex_flags = flags;
2002 Entry(sqe)
2003 }
2004}
2005
2006opcode! {
2007 #[derive(Debug)]
2013 pub struct FutexWaitV {
2014 futexv: { *const types::FutexWaitV },
2015 nr_futex: { u32 },
2016 ;;
2017 flags: u32 = 0
2018 }
2019
2020 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
2021
2022 pub fn build(self) -> Entry {
2023 let FutexWaitV { futexv, nr_futex, flags } = self;
2024
2025 let mut sqe = sqe_zeroed();
2026 sqe.opcode = Self::CODE;
2027 sqe.__bindgen_anon_2.addr = futexv as usize as _;
2028 sqe.len = nr_futex;
2029 sqe.__bindgen_anon_3.futex_flags = flags;
2030 Entry(sqe)
2031 }
2032}
2033
2034opcode! {
2035 #[derive(Debug)]
2039 pub struct WaitId {
2040 idtype: { libc::idtype_t },
2041 id: { libc::id_t },
2042 options: { libc::c_int },
2043 ;;
2044 infop: *const libc::siginfo_t = std::ptr::null(),
2045 flags: libc::c_uint = 0,
2046 }
2047
2048 pub const CODE = sys::IORING_OP_WAITID;
2049
2050 pub fn build(self) -> Entry {
2051 let mut sqe = sqe_zeroed();
2052 sqe.opcode = Self::CODE;
2053 sqe.fd = self.id as _;
2054 sqe.len = self.idtype as _;
2055 sqe.__bindgen_anon_3.waitid_flags = self.flags;
2056 sqe.__bindgen_anon_5.file_index = self.options as _;
2057 sqe.__bindgen_anon_1.addr2 = self.infop as _;
2058 Entry(sqe)
2059 }
2060}
2061
2062opcode! {
2065 #[derive(Debug)]
2070 pub struct FixedFdInstall {
2071 fd: { types::Fixed },
2072 file_flags: { u32 },
2073 ;;
2074 }
2075
2076 pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
2077
2078 pub fn build(self) -> Entry {
2079 let FixedFdInstall { fd, file_flags } = self;
2080
2081 let mut sqe = sqe_zeroed();
2082 sqe.opcode = Self::CODE;
2083 sqe.fd = fd.0 as _;
2084 sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
2085 sqe.__bindgen_anon_3.install_fd_flags = file_flags;
2086 Entry(sqe)
2087 }
2088}
2089
2090opcode! {
2093 #[derive(Debug)]
2095 pub struct Ftruncate {
2096 fd: { impl sealed::UseFixed },
2097 len: { u64 },
2098 ;;
2099 }
2100
2101 pub const CODE = sys::IORING_OP_FTRUNCATE;
2102
2103 pub fn build(self) -> Entry {
2104 let Ftruncate { fd, len } = self;
2105
2106 let mut sqe = sqe_zeroed();
2107 sqe.opcode = Self::CODE;
2108 assign_fd!(sqe.fd = fd);
2109 sqe.__bindgen_anon_1.off = len;
2110 Entry(sqe)
2111 }
2112}
2113
2114opcode! {
2117 pub struct SendBundle {
2119 fd: { impl sealed::UseFixed },
2120 buf_group: { u16 },
2121 ;;
2122 flags: i32 = 0,
2123 len: u32 = 0
2124 }
2125
2126 pub const CODE = sys::IORING_OP_SEND;
2127
2128 pub fn build(self) -> Entry {
2129 let SendBundle { fd, len, flags, buf_group } = self;
2130
2131 let mut sqe = sqe_zeroed();
2132 sqe.opcode = Self::CODE;
2133 assign_fd!(sqe.fd = fd);
2134 sqe.len = len;
2135 sqe.__bindgen_anon_3.msg_flags = flags as _;
2136 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2137 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2138 sqe.__bindgen_anon_4.buf_group = buf_group;
2139 Entry(sqe)
2140 }
2141}
2142
2143opcode! {
2144 pub struct RecvBundle {
2154 fd: { impl sealed::UseFixed },
2155 buf_group: { u16 },
2156 ;;
2157 flags: i32 = 0
2158 }
2159
2160 pub const CODE = sys::IORING_OP_RECV;
2161
2162 pub fn build(self) -> Entry {
2163 let RecvBundle { fd, buf_group, flags } = self;
2164
2165 let mut sqe = sqe_zeroed();
2166 sqe.opcode = Self::CODE;
2167 assign_fd!(sqe.fd = fd);
2168 sqe.__bindgen_anon_3.msg_flags = flags as _;
2169 sqe.__bindgen_anon_4.buf_group = buf_group;
2170 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2171 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2172 Entry(sqe)
2173 }
2174}
2175
2176opcode! {
2177 pub struct RecvMultiBundle {
2195 fd: { impl sealed::UseFixed },
2196 buf_group: { u16 },
2197 ;;
2198 flags: i32 = 0
2199 }
2200
2201 pub const CODE = sys::IORING_OP_RECV;
2202
2203 pub fn build(self) -> Entry {
2204 let RecvMultiBundle { fd, buf_group, flags } = self;
2205
2206 let mut sqe = sqe_zeroed();
2207 sqe.opcode = Self::CODE;
2208 assign_fd!(sqe.fd = fd);
2209 sqe.__bindgen_anon_3.msg_flags = flags as _;
2210 sqe.__bindgen_anon_4.buf_group = buf_group;
2211 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2212 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2213 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2214 Entry(sqe)
2215 }
2216}
2217
2218opcode! {
2221 pub struct Bind {
2223 fd: { impl sealed::UseFixed },
2224 addr: { *const libc::sockaddr },
2225 addrlen: { libc::socklen_t }
2226 ;;
2227 }
2228
2229 pub const CODE = sys::IORING_OP_BIND;
2230
2231 pub fn build(self) -> Entry {
2232 let Bind { fd, addr, addrlen } = self;
2233
2234 let mut sqe = sqe_zeroed();
2235 sqe.opcode = Self::CODE;
2236 assign_fd!(sqe.fd = fd);
2237 sqe.__bindgen_anon_2.addr = addr as _;
2238 sqe.__bindgen_anon_1.off = addrlen as _;
2239 Entry(sqe)
2240 }
2241}
2242
2243opcode! {
2244 pub struct Listen {
2246 fd: { impl sealed::UseFixed },
2247 backlog: { i32 },
2248 ;;
2249 }
2250
2251 pub const CODE = sys::IORING_OP_LISTEN;
2252
2253 pub fn build(self) -> Entry {
2254 let Listen { fd, backlog } = self;
2255
2256 let mut sqe = sqe_zeroed();
2257 sqe.opcode = Self::CODE;
2258 assign_fd!(sqe.fd = fd);
2259 sqe.len = backlog as _;
2260 Entry(sqe)
2261 }
2262}
2263
2264opcode! {
2267 pub struct RecvZc {
2269 fd: { impl sealed::UseFixed },
2270 len: { u32 },
2271 ;;
2272 ifq: u32 = 0,
2273 ioprio: u16 = 0,
2274 }
2275
2276 pub const CODE = sys::IORING_OP_RECV_ZC;
2277
2278 pub fn build(self) -> Entry {
2279 let Self { fd, len, ifq, ioprio } = self;
2280
2281 let mut sqe = sqe_zeroed();
2282 sqe.opcode = Self::CODE;
2283 assign_fd!(sqe.fd = fd);
2284 sqe.len = len;
2285 sqe.ioprio = ioprio | sys::IORING_RECV_MULTISHOT as u16;
2286 sqe.__bindgen_anon_5.zcrx_ifq_idx = ifq;
2287 Entry(sqe)
2288 }
2289}
2290
2291opcode! {
2292 pub struct EpollWait {
2294 fd: { impl sealed::UseFixed },
2295 events: { *mut types::epoll_event },
2296 max_events: { u32 },
2297 ;;
2298 flags: u32 = 0,
2299 }
2300
2301 pub const CODE = sys::IORING_OP_EPOLL_WAIT;
2302
2303 pub fn build(self) -> Entry {
2304 let Self { fd, events, max_events, flags } = self;
2305
2306 let mut sqe = sqe_zeroed();
2307 sqe.opcode = Self::CODE;
2308 assign_fd!(sqe.fd = fd);
2309 sqe.__bindgen_anon_2.addr = events as u64;
2310 sqe.len = max_events;
2311 sqe.__bindgen_anon_3.poll32_events = flags;
2312 Entry(sqe)
2313 }
2314}
2315
2316opcode! {
2317 pub struct ReadvFixed {
2319 fd: { impl sealed::UseFixed },
2320 iovec: { *const ::libc::iovec },
2321 len: { u32 },
2322 buf_index: { u16 },
2323 ;;
2324 ioprio: u16 = 0,
2325 offset: u64 = 0,
2326 rw_flags: i32 = 0,
2327 }
2328
2329 pub const CODE = sys::IORING_OP_READV_FIXED;
2330
2331 pub fn build(self) -> Entry {
2332 let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2333
2334 let mut sqe = sqe_zeroed();
2335 sqe.opcode = Self::CODE;
2336 assign_fd!(sqe.fd = fd);
2337 sqe.__bindgen_anon_1.off = offset as _;
2338 sqe.__bindgen_anon_2.addr = iovec as _;
2339 sqe.len = len;
2340 sqe.__bindgen_anon_4.buf_index = buf_index;
2341 sqe.ioprio = ioprio;
2342 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
2343 Entry(sqe)
2344 }
2345}
2346
2347opcode! {
2348 pub struct WritevFixed {
2350 fd: { impl sealed::UseFixed },
2351 iovec: { *const ::libc::iovec },
2352 len: { u32 },
2353 buf_index: { u16 },
2354 ;;
2355 ioprio: u16 = 0,
2356 offset: u64 = 0,
2357 rw_flags: i32 = 0,
2358 }
2359
2360 pub const CODE = sys::IORING_OP_WRITEV_FIXED;
2361
2362 pub fn build(self) -> Entry {
2363 let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2364
2365 let mut sqe = sqe_zeroed();
2366 sqe.opcode = Self::CODE;
2367 assign_fd!(sqe.fd = fd);
2368 sqe.__bindgen_anon_1.off = offset as _;
2369 sqe.__bindgen_anon_2.addr = iovec as _;
2370 sqe.len = len;
2371 sqe.__bindgen_anon_4.buf_index = buf_index;
2372 sqe.ioprio = ioprio;
2373 sqe.__bindgen_anon_3.rw_flags = rw_flags as _;
2374 Entry(sqe)
2375 }
2376}
2377
2378opcode! {
2381 pub struct Pipe {
2383 fds: { *mut RawFd },
2384 ;;
2385 flags: u32 = 0,
2386 file_index: Option<types::DestinationSlot> = None,
2387 }
2388
2389 pub const CODE = sys::IORING_OP_PIPE;
2390
2391 pub fn build(self) -> Entry {
2392 let Self { fds, flags, file_index } = self;
2393
2394 let mut sqe = sqe_zeroed();
2395 sqe.opcode = Self::CODE;
2396 sqe.fd = 0;
2397 sqe.__bindgen_anon_2.addr = fds as _;
2398 sqe.__bindgen_anon_3.pipe_flags = flags;
2399 if let Some(dest) = file_index {
2400 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
2401 }
2402 Entry(sqe)
2403 }
2404}