1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: types::RwFlags = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: types::RwFlags = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
237 buf: { *mut u8 },
238 len: { u32 },
239 buf_index: { u16 },
240 ;;
241 ioprio: u16 = 0,
242 offset: u64 = 0,
244 rw_flags: types::RwFlags = 0
247 }
248
249 pub const CODE = sys::IORING_OP_READ_FIXED;
250
251 pub fn build(self) -> Entry {
252 let ReadFixed {
253 fd,
254 buf, len, offset,
255 buf_index,
256 ioprio, rw_flags
257 } = self;
258
259 let mut sqe = sqe_zeroed();
260 sqe.opcode = Self::CODE;
261 assign_fd!(sqe.fd = fd);
262 sqe.ioprio = ioprio;
263 sqe.__bindgen_anon_2.addr = buf as _;
264 sqe.len = len;
265 sqe.__bindgen_anon_1.off = offset;
266 sqe.__bindgen_anon_3.rw_flags = rw_flags;
267 sqe.__bindgen_anon_4.buf_index = buf_index;
268 Entry(sqe)
269 }
270}
271
272opcode! {
273 #[derive(Debug)]
278 pub struct WriteFixed {
279 fd: { impl sealed::UseFixed },
280 buf: { *const u8 },
281 len: { u32 },
282 buf_index: { u16 },
283 ;;
284 ioprio: u16 = 0,
285 offset: u64 = 0,
287 rw_flags: types::RwFlags = 0
290 }
291
292 pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294 pub fn build(self) -> Entry {
295 let WriteFixed {
296 fd,
297 buf, len, offset,
298 buf_index,
299 ioprio, rw_flags
300 } = self;
301
302 let mut sqe = sqe_zeroed();
303 sqe.opcode = Self::CODE;
304 assign_fd!(sqe.fd = fd);
305 sqe.ioprio = ioprio;
306 sqe.__bindgen_anon_2.addr = buf as _;
307 sqe.len = len;
308 sqe.__bindgen_anon_1.off = offset;
309 sqe.__bindgen_anon_3.rw_flags = rw_flags;
310 sqe.__bindgen_anon_4.buf_index = buf_index;
311 Entry(sqe)
312 }
313}
314
315opcode! {
316 #[derive(Debug)]
328 pub struct PollAdd {
329 fd: { impl sealed::UseFixed },
332 flags: { u32 },
333 ;;
334 multi: bool = false
335 }
336
337 pub const CODE = sys::IORING_OP_POLL_ADD;
338
339 pub fn build(self) -> Entry {
340 let PollAdd { fd, flags, multi } = self;
341
342 let mut sqe = sqe_zeroed();
343 sqe.opcode = Self::CODE;
344 assign_fd!(sqe.fd = fd);
345 if multi {
346 sqe.len = sys::IORING_POLL_ADD_MULTI;
347 }
348
349 #[cfg(target_endian = "little")] {
350 sqe.__bindgen_anon_3.poll32_events = flags;
351 }
352
353 #[cfg(target_endian = "big")] {
354 let x = flags << 16;
355 let y = flags >> 16;
356 let flags = x | y;
357 sqe.__bindgen_anon_3.poll32_events = flags;
358 }
359
360 Entry(sqe)
361 }
362}
363
364opcode! {
365 #[derive(Debug)]
370 pub struct PollRemove {
371 user_data: { u64 }
372 ;;
373 }
374
375 pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377 pub fn build(self) -> Entry {
378 let PollRemove { user_data } = self;
379
380 let mut sqe = sqe_zeroed();
381 sqe.opcode = Self::CODE;
382 sqe.fd = -1;
383 sqe.__bindgen_anon_2.addr = user_data;
384 Entry(sqe)
385 }
386}
387
388opcode! {
389 #[derive(Debug)]
391 pub struct SyncFileRange {
392 fd: { impl sealed::UseFixed },
393 len: { u32 },
394 ;;
395 offset: u64 = 0,
397 flags: u32 = 0
399 }
400
401 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403 pub fn build(self) -> Entry {
404 let SyncFileRange {
405 fd,
406 len, offset,
407 flags
408 } = self;
409
410 let mut sqe = sqe_zeroed();
411 sqe.opcode = Self::CODE;
412 assign_fd!(sqe.fd = fd);
413 sqe.len = len;
414 sqe.__bindgen_anon_1.off = offset;
415 sqe.__bindgen_anon_3.sync_range_flags = flags;
416 Entry(sqe)
417 }
418}
419
420opcode! {
421 #[derive(Debug)]
426 pub struct SendMsg {
427 fd: { impl sealed::UseFixed },
428 msg: { *const libc::msghdr },
429 ;;
430 ioprio: u16 = 0,
431 flags: u32 = 0
432 }
433
434 pub const CODE = sys::IORING_OP_SENDMSG;
435
436 pub fn build(self) -> Entry {
437 let SendMsg { fd, msg, ioprio, flags } = self;
438
439 let mut sqe = sqe_zeroed();
440 sqe.opcode = Self::CODE;
441 assign_fd!(sqe.fd = fd);
442 sqe.ioprio = ioprio;
443 sqe.__bindgen_anon_2.addr = msg as _;
444 sqe.len = 1;
445 sqe.__bindgen_anon_3.msg_flags = flags;
446 Entry(sqe)
447 }
448}
449
450opcode! {
451 #[derive(Debug)]
455 pub struct RecvMsg {
456 fd: { impl sealed::UseFixed },
457 msg: { *mut libc::msghdr },
458 ;;
459 ioprio: u16 = 0,
460 flags: u32 = 0,
461 buf_group: u16 = 0
462 }
463
464 pub const CODE = sys::IORING_OP_RECVMSG;
465
466 pub fn build(self) -> Entry {
467 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469 let mut sqe = sqe_zeroed();
470 sqe.opcode = Self::CODE;
471 assign_fd!(sqe.fd = fd);
472 sqe.ioprio = ioprio;
473 sqe.__bindgen_anon_2.addr = msg as _;
474 sqe.len = 1;
475 sqe.__bindgen_anon_3.msg_flags = flags;
476 sqe.__bindgen_anon_4.buf_group = buf_group;
477 Entry(sqe)
478 }
479}
480
481opcode! {
482 #[derive(Debug)]
505 pub struct RecvMsgMulti {
506 fd: { impl sealed::UseFixed },
507 msg: { *const libc::msghdr },
508 buf_group: { u16 },
509 ;;
510 ioprio: u16 = 0,
511 flags: u32 = 0
512 }
513
514 pub const CODE = sys::IORING_OP_RECVMSG;
515
516 pub fn build(self) -> Entry {
517 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519 let mut sqe = sqe_zeroed();
520 sqe.opcode = Self::CODE;
521 assign_fd!(sqe.fd = fd);
522 sqe.__bindgen_anon_2.addr = msg as _;
523 sqe.len = 1;
524 sqe.__bindgen_anon_3.msg_flags = flags;
525 sqe.__bindgen_anon_4.buf_group = buf_group;
526 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528 Entry(sqe)
529 }
530}
531
532opcode! {
533 #[derive(Debug)]
542 pub struct Timeout {
543 timespec: { *const types::Timespec },
544 ;;
545 count: u32 = 0,
549
550 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
551 }
552
553 pub const CODE = sys::IORING_OP_TIMEOUT;
554
555 pub fn build(self) -> Entry {
556 let Timeout { timespec, count, flags } = self;
557
558 let mut sqe = sqe_zeroed();
559 sqe.opcode = Self::CODE;
560 sqe.fd = -1;
561 sqe.__bindgen_anon_2.addr = timespec as _;
562 sqe.len = 1;
563 sqe.__bindgen_anon_1.off = count as _;
564 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
565 Entry(sqe)
566 }
567}
568
569opcode! {
572 pub struct TimeoutRemove {
574 user_data: { u64 },
575 ;;
576 }
577
578 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
579
580 pub fn build(self) -> Entry {
581 let TimeoutRemove { user_data } = self;
582
583 let mut sqe = sqe_zeroed();
584 sqe.opcode = Self::CODE;
585 sqe.fd = -1;
586 sqe.__bindgen_anon_2.addr = user_data;
587 Entry(sqe)
588 }
589}
590
591opcode! {
592 pub struct TimeoutUpdate {
595 user_data: { u64 },
596 timespec: { *const types::Timespec },
597 ;;
598 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
599 }
600
601 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
602
603 pub fn build(self) -> Entry {
604 let TimeoutUpdate { user_data, timespec, flags } = self;
605
606 let mut sqe = sqe_zeroed();
607 sqe.opcode = Self::CODE;
608 sqe.fd = -1;
609 sqe.__bindgen_anon_1.off = timespec as _;
610 sqe.__bindgen_anon_2.addr = user_data;
611 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
612 Entry(sqe)
613 }
614}
615
616opcode! {
617 pub struct Accept {
619 fd: { impl sealed::UseFixed },
620 addr: { *mut libc::sockaddr },
621 addrlen: { *mut libc::socklen_t },
622 ;;
623 file_index: Option<types::DestinationSlot> = None,
624 flags: i32 = 0
625 }
626
627 pub const CODE = sys::IORING_OP_ACCEPT;
628
629 pub fn build(self) -> Entry {
630 let Accept { fd, addr, addrlen, file_index, flags } = self;
631
632 let mut sqe = sqe_zeroed();
633 sqe.opcode = Self::CODE;
634 assign_fd!(sqe.fd = fd);
635 sqe.__bindgen_anon_2.addr = addr as _;
636 sqe.__bindgen_anon_1.addr2 = addrlen as _;
637 sqe.__bindgen_anon_3.accept_flags = flags as _;
638 if let Some(dest) = file_index {
639 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
640 }
641 Entry(sqe)
642 }
643}
644
645opcode! {
646 pub struct SetSockOpt {
648 fd: { impl sealed::UseFixed },
649 level: { u32 },
650 optname: { u32 },
651 optval: { *const libc::c_void },
652 optlen: { u32 },
653 ;;
654 flags: u32 = 0
655 }
656
657 pub const CODE = sys::IORING_OP_URING_CMD;
658
659 pub fn build(self) -> Entry {
660 let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
661 let mut sqe = sqe_zeroed();
662 sqe.opcode = Self::CODE;
663 assign_fd!(sqe.fd = fd);
664 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
665
666 sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
667 sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
668 sqe.__bindgen_anon_3.uring_cmd_flags = flags;
669 sqe.__bindgen_anon_5.optlen = optlen;
670 unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
671 Entry(sqe)
672 }
673}
674
675opcode! {
676 pub struct AsyncCancel {
678 user_data: { u64 }
679 ;;
680
681 }
683
684 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
685
686 pub fn build(self) -> Entry {
687 let AsyncCancel { user_data } = self;
688
689 let mut sqe = sqe_zeroed();
690 sqe.opcode = Self::CODE;
691 sqe.fd = -1;
692 sqe.__bindgen_anon_2.addr = user_data;
693 Entry(sqe)
694 }
695}
696
697opcode! {
698 pub struct LinkTimeout {
702 timespec: { *const types::Timespec },
703 ;;
704 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
705 }
706
707 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
708
709 pub fn build(self) -> Entry {
710 let LinkTimeout { timespec, flags } = self;
711
712 let mut sqe = sqe_zeroed();
713 sqe.opcode = Self::CODE;
714 sqe.fd = -1;
715 sqe.__bindgen_anon_2.addr = timespec as _;
716 sqe.len = 1;
717 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
718 Entry(sqe)
719 }
720}
721
722opcode! {
723 pub struct Connect {
725 fd: { impl sealed::UseFixed },
726 addr: { *const libc::sockaddr },
727 addrlen: { libc::socklen_t }
728 ;;
729 }
730
731 pub const CODE = sys::IORING_OP_CONNECT;
732
733 pub fn build(self) -> Entry {
734 let Connect { fd, addr, addrlen } = self;
735
736 let mut sqe = sqe_zeroed();
737 sqe.opcode = Self::CODE;
738 assign_fd!(sqe.fd = fd);
739 sqe.__bindgen_anon_2.addr = addr as _;
740 sqe.__bindgen_anon_1.off = addrlen as _;
741 Entry(sqe)
742 }
743}
744
745opcode! {
748 pub struct Fallocate {
750 fd: { impl sealed::UseFixed },
751 len: { u64 },
752 ;;
753 offset: u64 = 0,
754 mode: i32 = 0
755 }
756
757 pub const CODE = sys::IORING_OP_FALLOCATE;
758
759 pub fn build(self) -> Entry {
760 let Fallocate { fd, len, offset, mode } = self;
761
762 let mut sqe = sqe_zeroed();
763 sqe.opcode = Self::CODE;
764 assign_fd!(sqe.fd = fd);
765 sqe.__bindgen_anon_2.addr = len;
766 sqe.len = mode as _;
767 sqe.__bindgen_anon_1.off = offset;
768 Entry(sqe)
769 }
770}
771
772opcode! {
773 pub struct OpenAt {
775 dirfd: { impl sealed::UseFd },
776 pathname: { *const libc::c_char },
777 ;;
778 file_index: Option<types::DestinationSlot> = None,
779 flags: i32 = 0,
780 mode: libc::mode_t = 0
781 }
782
783 pub const CODE = sys::IORING_OP_OPENAT;
784
785 pub fn build(self) -> Entry {
786 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
787
788 let mut sqe = sqe_zeroed();
789 sqe.opcode = Self::CODE;
790 sqe.fd = dirfd;
791 sqe.__bindgen_anon_2.addr = pathname as _;
792 sqe.len = mode;
793 sqe.__bindgen_anon_3.open_flags = flags as _;
794 if let Some(dest) = file_index {
795 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
796 }
797 Entry(sqe)
798 }
799}
800
801opcode! {
802 pub struct Close {
806 fd: { impl sealed::UseFixed },
807 ;;
808 }
809
810 pub const CODE = sys::IORING_OP_CLOSE;
811
812 pub fn build(self) -> Entry {
813 let Close { fd } = self;
814
815 let mut sqe = sqe_zeroed();
816 sqe.opcode = Self::CODE;
817 match fd {
818 sealed::Target::Fd(fd) => sqe.fd = fd,
819 sealed::Target::Fixed(idx) => {
820 sqe.fd = 0;
821 sqe.__bindgen_anon_5.file_index = idx + 1;
822 }
823 }
824 Entry(sqe)
825 }
826}
827
828opcode! {
829 pub struct FilesUpdate {
833 fds: { *const RawFd },
834 len: { u32 },
835 ;;
836 offset: i32 = 0
837 }
838
839 pub const CODE = sys::IORING_OP_FILES_UPDATE;
840
841 pub fn build(self) -> Entry {
842 let FilesUpdate { fds, len, offset } = self;
843
844 let mut sqe = sqe_zeroed();
845 sqe.opcode = Self::CODE;
846 sqe.fd = -1;
847 sqe.__bindgen_anon_2.addr = fds as _;
848 sqe.len = len;
849 sqe.__bindgen_anon_1.off = offset as _;
850 Entry(sqe)
851 }
852}
853
854opcode! {
855 pub struct Statx {
857 dirfd: { impl sealed::UseFd },
858 pathname: { *const libc::c_char },
859 statxbuf: { *mut types::statx },
860 ;;
861 flags: i32 = 0,
862 mask: u32 = 0
863 }
864
865 pub const CODE = sys::IORING_OP_STATX;
866
867 pub fn build(self) -> Entry {
868 let Statx {
869 dirfd, pathname, statxbuf,
870 flags, mask
871 } = self;
872
873 let mut sqe = sqe_zeroed();
874 sqe.opcode = Self::CODE;
875 sqe.fd = dirfd;
876 sqe.__bindgen_anon_2.addr = pathname as _;
877 sqe.len = mask;
878 sqe.__bindgen_anon_1.off = statxbuf as _;
879 sqe.__bindgen_anon_3.statx_flags = flags as _;
880 Entry(sqe)
881 }
882}
883
884opcode! {
885 pub struct Read {
896 fd: { impl sealed::UseFixed },
897 buf: { *mut u8 },
898 len: { u32 },
899 ;;
900 offset: u64 = 0,
906 ioprio: u16 = 0,
907 rw_flags: types::RwFlags = 0,
908 buf_group: u16 = 0
909 }
910
911 pub const CODE = sys::IORING_OP_READ;
912
913 pub fn build(self) -> Entry {
914 let Read {
915 fd,
916 buf, len, offset,
917 ioprio, rw_flags,
918 buf_group
919 } = self;
920
921 let mut sqe = sqe_zeroed();
922 sqe.opcode = Self::CODE;
923 assign_fd!(sqe.fd = fd);
924 sqe.ioprio = ioprio;
925 sqe.__bindgen_anon_2.addr = buf as _;
926 sqe.len = len;
927 sqe.__bindgen_anon_1.off = offset;
928 sqe.__bindgen_anon_3.rw_flags = rw_flags;
929 sqe.__bindgen_anon_4.buf_group = buf_group;
930 Entry(sqe)
931 }
932}
933
934opcode! {
935 pub struct Write {
946 fd: { impl sealed::UseFixed },
947 buf: { *const u8 },
948 len: { u32 },
949 ;;
950 offset: u64 = 0,
956 ioprio: u16 = 0,
957 rw_flags: types::RwFlags = 0
958 }
959
960 pub const CODE = sys::IORING_OP_WRITE;
961
962 pub fn build(self) -> Entry {
963 let Write {
964 fd,
965 buf, len, offset,
966 ioprio, rw_flags
967 } = self;
968
969 let mut sqe = sqe_zeroed();
970 sqe.opcode = Self::CODE;
971 assign_fd!(sqe.fd = fd);
972 sqe.ioprio = ioprio;
973 sqe.__bindgen_anon_2.addr = buf as _;
974 sqe.len = len;
975 sqe.__bindgen_anon_1.off = offset;
976 sqe.__bindgen_anon_3.rw_flags = rw_flags;
977 Entry(sqe)
978 }
979}
980
981opcode! {
982 pub struct Fadvise {
984 fd: { impl sealed::UseFixed },
985 len: { libc::off_t },
986 advice: { i32 },
987 ;;
988 offset: u64 = 0,
989 }
990
991 pub const CODE = sys::IORING_OP_FADVISE;
992
993 pub fn build(self) -> Entry {
994 let Fadvise { fd, len, advice, offset } = self;
995
996 let mut sqe = sqe_zeroed();
997 sqe.opcode = Self::CODE;
998 assign_fd!(sqe.fd = fd);
999 sqe.len = len as _;
1000 sqe.__bindgen_anon_1.off = offset;
1001 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1002 Entry(sqe)
1003 }
1004}
1005
1006opcode! {
1007 pub struct Madvise {
1009 addr: { *const libc::c_void },
1010 len: { libc::off_t },
1011 advice: { i32 },
1012 ;;
1013 }
1014
1015 pub const CODE = sys::IORING_OP_MADVISE;
1016
1017 pub fn build(self) -> Entry {
1018 let Madvise { addr, len, advice } = self;
1019
1020 let mut sqe = sqe_zeroed();
1021 sqe.opcode = Self::CODE;
1022 sqe.fd = -1;
1023 sqe.__bindgen_anon_2.addr = addr as _;
1024 sqe.len = len as _;
1025 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1026 Entry(sqe)
1027 }
1028}
1029
1030opcode! {
1031 pub struct Send {
1033 fd: { impl sealed::UseFixed },
1034 buf: { *const u8 },
1035 len: { u32 },
1036 ;;
1037 flags: i32 = 0,
1038
1039 dest_addr: *const libc::sockaddr = core::ptr::null(),
1044 dest_addr_len: libc::socklen_t = 0,
1045 }
1046
1047 pub const CODE = sys::IORING_OP_SEND;
1048
1049 pub fn build(self) -> Entry {
1050 let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1051
1052 let mut sqe = sqe_zeroed();
1053 sqe.opcode = Self::CODE;
1054 assign_fd!(sqe.fd = fd);
1055 sqe.__bindgen_anon_2.addr = buf as _;
1056 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1057 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1058 sqe.len = len;
1059 sqe.__bindgen_anon_3.msg_flags = flags as _;
1060 Entry(sqe)
1061 }
1062}
1063
1064opcode! {
1065 pub struct Recv {
1067 fd: { impl sealed::UseFixed },
1068 buf: { *mut u8 },
1069 len: { u32 },
1070 ;;
1071 flags: i32 = 0,
1072 buf_group: u16 = 0
1073 }
1074
1075 pub const CODE = sys::IORING_OP_RECV;
1076
1077 pub fn build(self) -> Entry {
1078 let Recv { fd, buf, len, flags, buf_group } = self;
1079
1080 let mut sqe = sqe_zeroed();
1081 sqe.opcode = Self::CODE;
1082 assign_fd!(sqe.fd = fd);
1083 sqe.__bindgen_anon_2.addr = buf as _;
1084 sqe.len = len;
1085 sqe.__bindgen_anon_3.msg_flags = flags as _;
1086 sqe.__bindgen_anon_4.buf_group = buf_group;
1087 Entry(sqe)
1088 }
1089}
1090
1091opcode! {
1092 pub struct RecvMulti {
1108 fd: { impl sealed::UseFixed },
1109 buf_group: { u16 },
1110 ;;
1111 flags: i32 = 0,
1112 }
1113
1114 pub const CODE = sys::IORING_OP_RECV;
1115
1116 pub fn build(self) -> Entry {
1117 let RecvMulti { fd, buf_group, flags } = self;
1118
1119 let mut sqe = sqe_zeroed();
1120 sqe.opcode = Self::CODE;
1121 assign_fd!(sqe.fd = fd);
1122 sqe.__bindgen_anon_3.msg_flags = flags as _;
1123 sqe.__bindgen_anon_4.buf_group = buf_group;
1124 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1125 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1126 Entry(sqe)
1127 }
1128}
1129
1130opcode! {
1131 pub struct OpenAt2 {
1133 dirfd: { impl sealed::UseFd },
1134 pathname: { *const libc::c_char },
1135 how: { *const types::OpenHow }
1136 ;;
1137 file_index: Option<types::DestinationSlot> = None,
1138 }
1139
1140 pub const CODE = sys::IORING_OP_OPENAT2;
1141
1142 pub fn build(self) -> Entry {
1143 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1144
1145 let mut sqe = sqe_zeroed();
1146 sqe.opcode = Self::CODE;
1147 sqe.fd = dirfd;
1148 sqe.__bindgen_anon_2.addr = pathname as _;
1149 sqe.len = mem::size_of::<sys::open_how>() as _;
1150 sqe.__bindgen_anon_1.off = how as _;
1151 if let Some(dest) = file_index {
1152 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1153 }
1154 Entry(sqe)
1155 }
1156}
1157
1158opcode! {
1159 pub struct EpollCtl {
1161 epfd: { impl sealed::UseFixed },
1162 fd: { impl sealed::UseFd },
1163 op: { i32 },
1164 ev: { *const types::epoll_event },
1165 ;;
1166 }
1167
1168 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1169
1170 pub fn build(self) -> Entry {
1171 let EpollCtl { epfd, fd, op, ev } = self;
1172
1173 let mut sqe = sqe_zeroed();
1174 sqe.opcode = Self::CODE;
1175 assign_fd!(sqe.fd = epfd);
1176 sqe.__bindgen_anon_2.addr = ev as _;
1177 sqe.len = op as _;
1178 sqe.__bindgen_anon_1.off = fd as _;
1179 Entry(sqe)
1180 }
1181}
1182
1183opcode! {
1186 pub struct Splice {
1191 fd_in: { impl sealed::UseFixed },
1192 off_in: { i64 },
1193 fd_out: { impl sealed::UseFixed },
1194 off_out: { i64 },
1195 len: { u32 },
1196 ;;
1197 flags: u32 = 0
1199 }
1200
1201 pub const CODE = sys::IORING_OP_SPLICE;
1202
1203 pub fn build(self) -> Entry {
1204 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1205
1206 let mut sqe = sqe_zeroed();
1207 sqe.opcode = Self::CODE;
1208 assign_fd!(sqe.fd = fd_out);
1209 sqe.len = len;
1210 sqe.__bindgen_anon_1.off = off_out as _;
1211
1212 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1213 sealed::Target::Fd(fd) => fd,
1214 sealed::Target::Fixed(idx) => {
1215 flags |= sys::SPLICE_F_FD_IN_FIXED;
1216 idx as _
1217 }
1218 };
1219
1220 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1221 sqe.__bindgen_anon_3.splice_flags = flags;
1222 Entry(sqe)
1223 }
1224}
1225
1226opcode! {
1227 pub struct ProvideBuffers {
1231 addr: { *mut u8 },
1232 len: { i32 },
1233 nbufs: { u16 },
1234 bgid: { u16 },
1235 bid: { u16 }
1236 ;;
1237 }
1238
1239 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1240
1241 pub fn build(self) -> Entry {
1242 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1243
1244 let mut sqe = sqe_zeroed();
1245 sqe.opcode = Self::CODE;
1246 sqe.fd = nbufs as _;
1247 sqe.__bindgen_anon_2.addr = addr as _;
1248 sqe.len = len as _;
1249 sqe.__bindgen_anon_1.off = bid as _;
1250 sqe.__bindgen_anon_4.buf_group = bgid;
1251 Entry(sqe)
1252 }
1253}
1254
1255opcode! {
1256 pub struct RemoveBuffers {
1259 nbufs: { u16 },
1260 bgid: { u16 }
1261 ;;
1262 }
1263
1264 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1265
1266 pub fn build(self) -> Entry {
1267 let RemoveBuffers { nbufs, bgid } = self;
1268
1269 let mut sqe = sqe_zeroed();
1270 sqe.opcode = Self::CODE;
1271 sqe.fd = nbufs as _;
1272 sqe.__bindgen_anon_4.buf_group = bgid;
1273 Entry(sqe)
1274 }
1275}
1276
1277opcode! {
1280 pub struct Tee {
1282 fd_in: { impl sealed::UseFixed },
1283 fd_out: { impl sealed::UseFixed },
1284 len: { u32 }
1285 ;;
1286 flags: u32 = 0
1287 }
1288
1289 pub const CODE = sys::IORING_OP_TEE;
1290
1291 pub fn build(self) -> Entry {
1292 let Tee { fd_in, fd_out, len, mut flags } = self;
1293
1294 let mut sqe = sqe_zeroed();
1295 sqe.opcode = Self::CODE;
1296
1297 assign_fd!(sqe.fd = fd_out);
1298 sqe.len = len;
1299
1300 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1301 sealed::Target::Fd(fd) => fd,
1302 sealed::Target::Fixed(idx) => {
1303 flags |= sys::SPLICE_F_FD_IN_FIXED;
1304 idx as _
1305 }
1306 };
1307
1308 sqe.__bindgen_anon_3.splice_flags = flags;
1309
1310 Entry(sqe)
1311 }
1312}
1313
1314opcode! {
1317 pub struct Shutdown {
1320 fd: { impl sealed::UseFixed },
1321 how: { i32 },
1322 ;;
1323 }
1324
1325 pub const CODE = sys::IORING_OP_SHUTDOWN;
1326
1327 pub fn build(self) -> Entry {
1328 let Shutdown { fd, how } = self;
1329
1330 let mut sqe = sqe_zeroed();
1331 sqe.opcode = Self::CODE;
1332 assign_fd!(sqe.fd = fd);
1333 sqe.len = how as _;
1334 Entry(sqe)
1335 }
1336}
1337
1338opcode! {
1339 pub struct RenameAt {
1342 olddirfd: { impl sealed::UseFd },
1343 oldpath: { *const libc::c_char },
1344 newdirfd: { impl sealed::UseFd },
1345 newpath: { *const libc::c_char },
1346 ;;
1347 flags: u32 = 0
1348 }
1349
1350 pub const CODE = sys::IORING_OP_RENAMEAT;
1351
1352 pub fn build(self) -> Entry {
1353 let RenameAt {
1354 olddirfd, oldpath,
1355 newdirfd, newpath,
1356 flags
1357 } = self;
1358
1359 let mut sqe = sqe_zeroed();
1360 sqe.opcode = Self::CODE;
1361 sqe.fd = olddirfd;
1362 sqe.__bindgen_anon_2.addr = oldpath as _;
1363 sqe.len = newdirfd as _;
1364 sqe.__bindgen_anon_1.off = newpath as _;
1365 sqe.__bindgen_anon_3.rename_flags = flags;
1366 Entry(sqe)
1367 }
1368}
1369
1370opcode! {
1371 pub struct UnlinkAt {
1374 dirfd: { impl sealed::UseFd },
1375 pathname: { *const libc::c_char },
1376 ;;
1377 flags: i32 = 0
1378 }
1379
1380 pub const CODE = sys::IORING_OP_UNLINKAT;
1381
1382 pub fn build(self) -> Entry {
1383 let UnlinkAt { dirfd, pathname, flags } = self;
1384
1385 let mut sqe = sqe_zeroed();
1386 sqe.opcode = Self::CODE;
1387 sqe.fd = dirfd;
1388 sqe.__bindgen_anon_2.addr = pathname as _;
1389 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1390 Entry(sqe)
1391 }
1392}
1393
1394opcode! {
1397 pub struct MkDirAt {
1399 dirfd: { impl sealed::UseFd },
1400 pathname: { *const libc::c_char },
1401 ;;
1402 mode: libc::mode_t = 0
1403 }
1404
1405 pub const CODE = sys::IORING_OP_MKDIRAT;
1406
1407 pub fn build(self) -> Entry {
1408 let MkDirAt { dirfd, pathname, mode } = self;
1409
1410 let mut sqe = sqe_zeroed();
1411 sqe.opcode = Self::CODE;
1412 sqe.fd = dirfd;
1413 sqe.__bindgen_anon_2.addr = pathname as _;
1414 sqe.len = mode;
1415 Entry(sqe)
1416 }
1417}
1418
1419opcode! {
1420 pub struct SymlinkAt {
1422 newdirfd: { impl sealed::UseFd },
1423 target: { *const libc::c_char },
1424 linkpath: { *const libc::c_char },
1425 ;;
1426 }
1427
1428 pub const CODE = sys::IORING_OP_SYMLINKAT;
1429
1430 pub fn build(self) -> Entry {
1431 let SymlinkAt { newdirfd, target, linkpath } = self;
1432
1433 let mut sqe = sqe_zeroed();
1434 sqe.opcode = Self::CODE;
1435 sqe.fd = newdirfd;
1436 sqe.__bindgen_anon_2.addr = target as _;
1437 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1438 Entry(sqe)
1439 }
1440}
1441
1442opcode! {
1443 pub struct LinkAt {
1445 olddirfd: { impl sealed::UseFd },
1446 oldpath: { *const libc::c_char },
1447 newdirfd: { impl sealed::UseFd },
1448 newpath: { *const libc::c_char },
1449 ;;
1450 flags: i32 = 0
1451 }
1452
1453 pub const CODE = sys::IORING_OP_LINKAT;
1454
1455 pub fn build(self) -> Entry {
1456 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1457
1458 let mut sqe = sqe_zeroed();
1459 sqe.opcode = Self::CODE;
1460 sqe.fd = olddirfd as _;
1461 sqe.__bindgen_anon_2.addr = oldpath as _;
1462 sqe.len = newdirfd as _;
1463 sqe.__bindgen_anon_1.addr2 = newpath as _;
1464 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1465 Entry(sqe)
1466 }
1467}
1468
1469opcode! {
1472 pub struct GetXattr {
1474 name: { *const libc::c_char },
1475 value: { *mut libc::c_void },
1476 path: { *const libc::c_char },
1477 len: { u32 },
1478 ;;
1479 }
1480
1481 pub const CODE = sys::IORING_OP_GETXATTR;
1482
1483 pub fn build(self) -> Entry {
1484 let GetXattr { name, value, path, len } = self;
1485
1486 let mut sqe = sqe_zeroed();
1487 sqe.opcode = Self::CODE;
1488 sqe.__bindgen_anon_2.addr = name as _;
1489 sqe.len = len;
1490 sqe.__bindgen_anon_1.off = value as _;
1491 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1492 sqe.__bindgen_anon_3.xattr_flags = 0;
1493 Entry(sqe)
1494 }
1495}
1496
1497opcode! {
1498 pub struct SetXattr {
1500 name: { *const libc::c_char },
1501 value: { *const libc::c_void },
1502 path: { *const libc::c_char },
1503 len: { u32 },
1504 ;;
1505 flags: i32 = 0
1506 }
1507
1508 pub const CODE = sys::IORING_OP_SETXATTR;
1509
1510 pub fn build(self) -> Entry {
1511 let SetXattr { name, value, path, flags, len } = self;
1512
1513 let mut sqe = sqe_zeroed();
1514 sqe.opcode = Self::CODE;
1515 sqe.__bindgen_anon_2.addr = name as _;
1516 sqe.len = len;
1517 sqe.__bindgen_anon_1.off = value as _;
1518 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = path as _ };
1519 sqe.__bindgen_anon_3.xattr_flags = flags as _;
1520 Entry(sqe)
1521 }
1522}
1523
1524opcode! {
1525 pub struct FGetXattr {
1527 fd: { impl sealed::UseFixed },
1528 name: { *const libc::c_char },
1529 value: { *mut libc::c_void },
1530 len: { u32 },
1531 ;;
1532 }
1533
1534 pub const CODE = sys::IORING_OP_FGETXATTR;
1535
1536 pub fn build(self) -> Entry {
1537 let FGetXattr { fd, name, value, len } = self;
1538
1539 let mut sqe = sqe_zeroed();
1540 sqe.opcode = Self::CODE;
1541 assign_fd!(sqe.fd = fd);
1542 sqe.__bindgen_anon_2.addr = name as _;
1543 sqe.len = len;
1544 sqe.__bindgen_anon_1.off = value as _;
1545 sqe.__bindgen_anon_3.xattr_flags = 0;
1546 Entry(sqe)
1547 }
1548}
1549
1550opcode! {
1551 pub struct FSetXattr {
1553 fd: { impl sealed::UseFixed },
1554 name: { *const libc::c_char },
1555 value: { *const libc::c_void },
1556 len: { u32 },
1557 ;;
1558 flags: i32 = 0
1559 }
1560
1561 pub const CODE = sys::IORING_OP_FSETXATTR;
1562
1563 pub fn build(self) -> Entry {
1564 let FSetXattr { fd, name, value, flags, len } = self;
1565
1566 let mut sqe = sqe_zeroed();
1567 sqe.opcode = Self::CODE;
1568 assign_fd!(sqe.fd = fd);
1569 sqe.__bindgen_anon_2.addr = name as _;
1570 sqe.len = len;
1571 sqe.__bindgen_anon_1.off = value as _;
1572 sqe.__bindgen_anon_3.xattr_flags = flags as _;
1573 Entry(sqe)
1574 }
1575}
1576
1577opcode! {
1580 pub struct MsgRingData {
1582 ring_fd: { impl sealed::UseFd },
1583 result: { i32 },
1584 user_data: { u64 },
1585 user_flags: { Option<u32> },
1586 ;;
1587 opcode_flags: u32 = 0
1588 }
1589
1590 pub const CODE = sys::IORING_OP_MSG_RING;
1591
1592 pub fn build(self) -> Entry {
1593 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1594
1595 let mut sqe = sqe_zeroed();
1596 sqe.opcode = Self::CODE;
1597 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1598 sqe.fd = ring_fd;
1599 sqe.len = result as u32;
1600 sqe.__bindgen_anon_1.off = user_data;
1601 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1602 if let Some(flags) = user_flags {
1603 sqe.__bindgen_anon_5.file_index = flags;
1604 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1605 }
1606 Entry(sqe)
1607 }
1608}
1609
1610opcode! {
1613 pub struct AsyncCancel2 {
1617 builder: { types::CancelBuilder }
1618 ;;
1619 }
1620
1621 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1622
1623 pub fn build(self) -> Entry {
1624 let AsyncCancel2 { builder } = self;
1625
1626 let mut sqe = sqe_zeroed();
1627 sqe.opcode = Self::CODE;
1628 sqe.fd = builder.to_fd();
1629 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1630 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1631 Entry(sqe)
1632 }
1633}
1634
1635opcode! {
1636 pub struct UringCmd16 {
1638 fd: { impl sealed::UseFixed },
1639 cmd_op: { u32 },
1640 ;;
1641 buf_index: Option<u16> = None,
1644 cmd: [u8; 16] = [0u8; 16]
1646 }
1647
1648 pub const CODE = sys::IORING_OP_URING_CMD;
1649
1650 pub fn build(self) -> Entry {
1651 let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1652
1653 let mut sqe = sqe_zeroed();
1654 sqe.opcode = Self::CODE;
1655 assign_fd!(sqe.fd = fd);
1656 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1657 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1658 if let Some(buf_index) = buf_index {
1659 sqe.__bindgen_anon_4.buf_index = buf_index;
1660 unsafe {
1661 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1662 }
1663 }
1664 Entry(sqe)
1665 }
1666}
1667
1668opcode! {
1669 pub struct UringCmd80 {
1671 fd: { impl sealed::UseFixed },
1672 cmd_op: { u32 },
1673 ;;
1674 buf_index: Option<u16> = None,
1677 cmd: [u8; 80] = [0u8; 80]
1679 }
1680
1681 pub const CODE = sys::IORING_OP_URING_CMD;
1682
1683 pub fn build(self) -> Entry128 {
1684 let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1685
1686 let cmd1 = cmd[..16].try_into().unwrap();
1687 let cmd2 = cmd[16..].try_into().unwrap();
1688
1689 let mut sqe = sqe_zeroed();
1690 sqe.opcode = Self::CODE;
1691 assign_fd!(sqe.fd = fd);
1692 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1693 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1694 if let Some(buf_index) = buf_index {
1695 sqe.__bindgen_anon_4.buf_index = buf_index;
1696 unsafe {
1697 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1698 }
1699 }
1700 Entry128(Entry(sqe), cmd2)
1701 }
1702}
1703
1704opcode! {
1705 pub struct Socket {
1715 domain: { i32 },
1716 socket_type: { i32 },
1717 protocol: { i32 },
1718 ;;
1719 file_index: Option<types::DestinationSlot> = None,
1720 flags: types::RwFlags = 0,
1721 }
1722
1723 pub const CODE = sys::IORING_OP_SOCKET;
1724
1725 pub fn build(self) -> Entry {
1726 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1727
1728 let mut sqe = sqe_zeroed();
1729 sqe.opcode = Self::CODE;
1730 sqe.fd = domain as _;
1731 sqe.__bindgen_anon_1.off = socket_type as _;
1732 sqe.len = protocol as _;
1733 sqe.__bindgen_anon_3.rw_flags = flags;
1734 if let Some(dest) = file_index {
1735 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1736 }
1737 Entry(sqe)
1738 }
1739}
1740
1741opcode! {
1742 pub struct AcceptMulti {
1748 fd: { impl sealed::UseFixed },
1749 ;;
1750 allocate_file_index: bool = false,
1751 flags: i32 = 0
1752 }
1753
1754 pub const CODE = sys::IORING_OP_ACCEPT;
1755
1756 pub fn build(self) -> Entry {
1757 let AcceptMulti { fd, allocate_file_index, flags } = self;
1758
1759 let mut sqe = sqe_zeroed();
1760 sqe.opcode = Self::CODE;
1761 assign_fd!(sqe.fd = fd);
1762 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1763 sqe.__bindgen_anon_3.accept_flags = flags as _;
1766 if allocate_file_index {
1767 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1768 }
1769 Entry(sqe)
1770 }
1771}
1772
1773opcode! {
1776 pub struct MsgRingSendFd {
1778 ring_fd: { impl sealed::UseFd },
1779 fixed_slot_src: { types::Fixed },
1780 dest_slot_index: { types::DestinationSlot },
1781 user_data: { u64 },
1782 ;;
1783 opcode_flags: u32 = 0
1784 }
1785
1786 pub const CODE = sys::IORING_OP_MSG_RING;
1787
1788 pub fn build(self) -> Entry {
1789 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1790
1791 let mut sqe = sqe_zeroed();
1792 sqe.opcode = Self::CODE;
1793 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1794 sqe.fd = ring_fd;
1795 sqe.__bindgen_anon_1.off = user_data;
1796 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1797 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1798 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1799 Entry(sqe)
1800 }
1801}
1802
1803opcode! {
1806 pub struct SendZc {
1820 fd: { impl sealed::UseFixed },
1821 buf: { *const u8 },
1822 len: { u32 },
1823 ;;
1824 buf_index: Option<u16> = None,
1831 dest_addr: *const libc::sockaddr = core::ptr::null(),
1832 dest_addr_len: libc::socklen_t = 0,
1833 flags: i32 = 0,
1834 zc_flags: u16 = 0,
1835 }
1836
1837 pub const CODE = sys::IORING_OP_SEND_ZC;
1838
1839 pub fn build(self) -> Entry {
1840 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1841
1842 let mut sqe = sqe_zeroed();
1843 sqe.opcode = Self::CODE;
1844 assign_fd!(sqe.fd = fd);
1845 sqe.__bindgen_anon_2.addr = buf as _;
1846 sqe.len = len;
1847 sqe.__bindgen_anon_3.msg_flags = flags as _;
1848 sqe.ioprio = zc_flags;
1849 if let Some(buf_index) = buf_index {
1850 sqe.__bindgen_anon_4.buf_index = buf_index;
1851 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1852 }
1853 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1854 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1855 Entry(sqe)
1856 }
1857}
1858
1859opcode! {
1862 #[derive(Debug)]
1867 pub struct SendMsgZc {
1868 fd: { impl sealed::UseFixed },
1869 msg: { *const libc::msghdr },
1870 ;;
1871 ioprio: u16 = 0,
1872 flags: u32 = 0
1873 }
1874
1875 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1876
1877 pub fn build(self) -> Entry {
1878 let SendMsgZc { fd, msg, ioprio, flags } = self;
1879
1880 let mut sqe = sqe_zeroed();
1881 sqe.opcode = Self::CODE;
1882 assign_fd!(sqe.fd = fd);
1883 sqe.ioprio = ioprio;
1884 sqe.__bindgen_anon_2.addr = msg as _;
1885 sqe.len = 1;
1886 sqe.__bindgen_anon_3.msg_flags = flags;
1887 Entry(sqe)
1888 }
1889}
1890
1891opcode! {
1894 pub struct ReadMulti {
1896 fd: { impl sealed::UseFixed },
1897 len: { u32 },
1898 buf_group: { u16 },
1899 ;;
1900 offset: u64 = 0,
1901 }
1902
1903 pub const CODE = sys::IORING_OP_READ_MULTISHOT;
1904
1905 pub fn build(self) -> Entry {
1906 let Self { fd, len, buf_group, offset } = self;
1907
1908 let mut sqe = sqe_zeroed();
1909 sqe.opcode = Self::CODE;
1910 assign_fd!(sqe.fd = fd);
1911 sqe.__bindgen_anon_1.off = offset;
1912 sqe.len = len;
1913 sqe.__bindgen_anon_4.buf_group = buf_group;
1914 sqe.flags = crate::squeue::Flags::BUFFER_SELECT.bits();
1915 Entry(sqe)
1916 }
1917}
1918
1919opcode! {
1920 #[derive(Debug)]
1929 pub struct FutexWait {
1930 futex: { *const u32 },
1931 val: { u64 },
1932 mask: { u64 },
1933 futex_flags: { u32 },
1934 ;;
1935 flags: u32 = 0
1936 }
1937
1938 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1939
1940 pub fn build(self) -> Entry {
1941 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1942
1943 let mut sqe = sqe_zeroed();
1944 sqe.opcode = Self::CODE;
1945 sqe.fd = futex_flags as _;
1946 sqe.__bindgen_anon_2.addr = futex as usize as _;
1947 sqe.__bindgen_anon_1.off = val;
1948 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1949 sqe.__bindgen_anon_3.futex_flags = flags;
1950 Entry(sqe)
1951 }
1952}
1953
1954opcode! {
1955 #[derive(Debug)]
1963 pub struct FutexWake {
1964 futex: { *const u32 },
1965 val: { u64 },
1966 mask: { u64 },
1967 futex_flags: { u32 },
1968 ;;
1969 flags: u32 = 0
1970 }
1971
1972 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1973
1974 pub fn build(self) -> Entry {
1975 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1976
1977 let mut sqe = sqe_zeroed();
1978 sqe.opcode = Self::CODE;
1979 sqe.fd = futex_flags as _;
1980 sqe.__bindgen_anon_2.addr = futex as usize as _;
1981 sqe.__bindgen_anon_1.off = val;
1982 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1983 sqe.__bindgen_anon_3.futex_flags = flags;
1984 Entry(sqe)
1985 }
1986}
1987
1988opcode! {
1989 #[derive(Debug)]
1995 pub struct FutexWaitV {
1996 futexv: { *const types::FutexWaitV },
1997 nr_futex: { u32 },
1998 ;;
1999 flags: u32 = 0
2000 }
2001
2002 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
2003
2004 pub fn build(self) -> Entry {
2005 let FutexWaitV { futexv, nr_futex, flags } = self;
2006
2007 let mut sqe = sqe_zeroed();
2008 sqe.opcode = Self::CODE;
2009 sqe.__bindgen_anon_2.addr = futexv as usize as _;
2010 sqe.len = nr_futex;
2011 sqe.__bindgen_anon_3.futex_flags = flags;
2012 Entry(sqe)
2013 }
2014}
2015
2016opcode! {
2017 #[derive(Debug)]
2021 pub struct WaitId {
2022 idtype: { libc::idtype_t },
2023 id: { libc::id_t },
2024 options: { libc::c_int },
2025 ;;
2026 infop: *const libc::siginfo_t = std::ptr::null(),
2027 flags: libc::c_uint = 0,
2028 }
2029
2030 pub const CODE = sys::IORING_OP_WAITID;
2031
2032 pub fn build(self) -> Entry {
2033 let mut sqe = sqe_zeroed();
2034 sqe.opcode = Self::CODE;
2035 sqe.fd = self.id as _;
2036 sqe.len = self.idtype as _;
2037 sqe.__bindgen_anon_3.waitid_flags = self.flags;
2038 sqe.__bindgen_anon_5.file_index = self.options as _;
2039 sqe.__bindgen_anon_1.addr2 = self.infop as _;
2040 Entry(sqe)
2041 }
2042}
2043
2044opcode! {
2047 #[derive(Debug)]
2052 pub struct FixedFdInstall {
2053 fd: { types::Fixed },
2054 file_flags: { u32 },
2055 ;;
2056 }
2057
2058 pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
2059
2060 pub fn build(self) -> Entry {
2061 let FixedFdInstall { fd, file_flags } = self;
2062
2063 let mut sqe = sqe_zeroed();
2064 sqe.opcode = Self::CODE;
2065 sqe.fd = fd.0 as _;
2066 sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
2067 sqe.__bindgen_anon_3.install_fd_flags = file_flags;
2068 Entry(sqe)
2069 }
2070}
2071
2072opcode! {
2075 #[derive(Debug)]
2077 pub struct Ftruncate {
2078 fd: { impl sealed::UseFixed },
2079 len: { u64 },
2080 ;;
2081 }
2082
2083 pub const CODE = sys::IORING_OP_FTRUNCATE;
2084
2085 pub fn build(self) -> Entry {
2086 let Ftruncate { fd, len } = self;
2087
2088 let mut sqe = sqe_zeroed();
2089 sqe.opcode = Self::CODE;
2090 assign_fd!(sqe.fd = fd);
2091 sqe.__bindgen_anon_1.off = len;
2092 Entry(sqe)
2093 }
2094}
2095
2096opcode! {
2099 pub struct SendBundle {
2101 fd: { impl sealed::UseFixed },
2102 buf_group: { u16 },
2103 ;;
2104 flags: i32 = 0,
2105 len: u32 = 0
2106 }
2107
2108 pub const CODE = sys::IORING_OP_SEND;
2109
2110 pub fn build(self) -> Entry {
2111 let SendBundle { fd, len, flags, buf_group } = self;
2112
2113 let mut sqe = sqe_zeroed();
2114 sqe.opcode = Self::CODE;
2115 assign_fd!(sqe.fd = fd);
2116 sqe.len = len;
2117 sqe.__bindgen_anon_3.msg_flags = flags as _;
2118 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2119 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2120 sqe.__bindgen_anon_4.buf_group = buf_group;
2121 Entry(sqe)
2122 }
2123}
2124
2125opcode! {
2126 pub struct RecvBundle {
2136 fd: { impl sealed::UseFixed },
2137 buf_group: { u16 },
2138 ;;
2139 flags: i32 = 0
2140 }
2141
2142 pub const CODE = sys::IORING_OP_RECV;
2143
2144 pub fn build(self) -> Entry {
2145 let RecvBundle { fd, buf_group, flags } = self;
2146
2147 let mut sqe = sqe_zeroed();
2148 sqe.opcode = Self::CODE;
2149 assign_fd!(sqe.fd = fd);
2150 sqe.__bindgen_anon_3.msg_flags = flags as _;
2151 sqe.__bindgen_anon_4.buf_group = buf_group;
2152 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2153 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2154 Entry(sqe)
2155 }
2156}
2157
2158opcode! {
2159 pub struct RecvMultiBundle {
2177 fd: { impl sealed::UseFixed },
2178 buf_group: { u16 },
2179 ;;
2180 flags: i32 = 0
2181 }
2182
2183 pub const CODE = sys::IORING_OP_RECV;
2184
2185 pub fn build(self) -> Entry {
2186 let RecvMultiBundle { fd, buf_group, flags } = self;
2187
2188 let mut sqe = sqe_zeroed();
2189 sqe.opcode = Self::CODE;
2190 assign_fd!(sqe.fd = fd);
2191 sqe.__bindgen_anon_3.msg_flags = flags as _;
2192 sqe.__bindgen_anon_4.buf_group = buf_group;
2193 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2194 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2195 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2196 Entry(sqe)
2197 }
2198}
2199
2200opcode! {
2203 pub struct Bind {
2205 fd: { impl sealed::UseFixed },
2206 addr: { *const libc::sockaddr },
2207 addrlen: { libc::socklen_t }
2208 ;;
2209 }
2210
2211 pub const CODE = sys::IORING_OP_BIND;
2212
2213 pub fn build(self) -> Entry {
2214 let Bind { fd, addr, addrlen } = self;
2215
2216 let mut sqe = sqe_zeroed();
2217 sqe.opcode = Self::CODE;
2218 assign_fd!(sqe.fd = fd);
2219 sqe.__bindgen_anon_2.addr = addr as _;
2220 sqe.__bindgen_anon_1.off = addrlen as _;
2221 Entry(sqe)
2222 }
2223}
2224
2225opcode! {
2226 pub struct Listen {
2228 fd: { impl sealed::UseFixed },
2229 backlog: { i32 },
2230 ;;
2231 }
2232
2233 pub const CODE = sys::IORING_OP_LISTEN;
2234
2235 pub fn build(self) -> Entry {
2236 let Listen { fd, backlog } = self;
2237
2238 let mut sqe = sqe_zeroed();
2239 sqe.opcode = Self::CODE;
2240 assign_fd!(sqe.fd = fd);
2241 sqe.len = backlog as _;
2242 Entry(sqe)
2243 }
2244}
2245
2246opcode! {
2249 pub struct RecvZc {
2251 fd: { impl sealed::UseFixed },
2252 len: { u32 },
2253 ;;
2254 ifq: u32 = 0,
2255 ioprio: u16 = 0,
2256 }
2257
2258 pub const CODE = sys::IORING_OP_RECV_ZC;
2259
2260 pub fn build(self) -> Entry {
2261 let Self { fd, len, ifq, ioprio } = self;
2262
2263 let mut sqe = sqe_zeroed();
2264 sqe.opcode = Self::CODE;
2265 assign_fd!(sqe.fd = fd);
2266 sqe.len = len;
2267 sqe.ioprio = ioprio | sys::IORING_RECV_MULTISHOT as u16;
2268 sqe.__bindgen_anon_5.zcrx_ifq_idx = ifq;
2269 Entry(sqe)
2270 }
2271}
2272
2273opcode! {
2274 pub struct EpollWait {
2276 fd: { impl sealed::UseFixed },
2277 events: { *mut types::epoll_event },
2278 max_events: { u32 },
2279 ;;
2280 flags: u32 = 0,
2281 }
2282
2283 pub const CODE = sys::IORING_OP_EPOLL_WAIT;
2284
2285 pub fn build(self) -> Entry {
2286 let Self { fd, events, max_events, flags } = self;
2287
2288 let mut sqe = sqe_zeroed();
2289 sqe.opcode = Self::CODE;
2290 assign_fd!(sqe.fd = fd);
2291 sqe.__bindgen_anon_2.addr = events as u64;
2292 sqe.len = max_events;
2293 sqe.__bindgen_anon_3.poll32_events = flags;
2294 Entry(sqe)
2295 }
2296}
2297
2298opcode! {
2299 pub struct ReadvFixed {
2301 fd: { impl sealed::UseFixed },
2302 iovec: { *const ::libc::iovec },
2303 len: { u32 },
2304 buf_index: { u16 },
2305 ;;
2306 ioprio: u16 = 0,
2307 offset: u64 = 0,
2308 rw_flags: i32 = 0,
2309 }
2310
2311 pub const CODE = sys::IORING_OP_READV_FIXED;
2312
2313 pub fn build(self) -> Entry {
2314 let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2315
2316 let mut sqe = sqe_zeroed();
2317 sqe.opcode = Self::CODE;
2318 assign_fd!(sqe.fd = fd);
2319 sqe.__bindgen_anon_1.off = offset as _;
2320 sqe.__bindgen_anon_2.addr = iovec as _;
2321 sqe.len = len;
2322 sqe.__bindgen_anon_4.buf_index = buf_index;
2323 sqe.ioprio = ioprio;
2324 sqe.__bindgen_anon_3.rw_flags = rw_flags;
2325 Entry(sqe)
2326 }
2327}
2328
2329opcode! {
2330 pub struct WritevFixed {
2332 fd: { impl sealed::UseFixed },
2333 iovec: { *const ::libc::iovec },
2334 len: { u32 },
2335 buf_index: { u16 },
2336 ;;
2337 ioprio: u16 = 0,
2338 offset: u64 = 0,
2339 rw_flags: i32 = 0,
2340 }
2341
2342 pub const CODE = sys::IORING_OP_WRITEV_FIXED;
2343
2344 pub fn build(self) -> Entry {
2345 let Self { fd, iovec, len, buf_index, offset, ioprio, rw_flags } = self;
2346
2347 let mut sqe = sqe_zeroed();
2348 sqe.opcode = Self::CODE;
2349 assign_fd!(sqe.fd = fd);
2350 sqe.__bindgen_anon_1.off = offset as _;
2351 sqe.__bindgen_anon_2.addr = iovec as _;
2352 sqe.len = len;
2353 sqe.__bindgen_anon_4.buf_index = buf_index;
2354 sqe.ioprio = ioprio;
2355 sqe.__bindgen_anon_3.rw_flags = rw_flags;
2356 Entry(sqe)
2357 }
2358}