1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: types::RwFlags = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: types::RwFlags = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
237 buf: { *mut u8 },
238 len: { u32 },
239 buf_index: { u16 },
240 ;;
241 ioprio: u16 = 0,
242 offset: u64 = 0,
244 rw_flags: types::RwFlags = 0
247 }
248
249 pub const CODE = sys::IORING_OP_READ_FIXED;
250
251 pub fn build(self) -> Entry {
252 let ReadFixed {
253 fd,
254 buf, len, offset,
255 buf_index,
256 ioprio, rw_flags
257 } = self;
258
259 let mut sqe = sqe_zeroed();
260 sqe.opcode = Self::CODE;
261 assign_fd!(sqe.fd = fd);
262 sqe.ioprio = ioprio;
263 sqe.__bindgen_anon_2.addr = buf as _;
264 sqe.len = len;
265 sqe.__bindgen_anon_1.off = offset;
266 sqe.__bindgen_anon_3.rw_flags = rw_flags;
267 sqe.__bindgen_anon_4.buf_index = buf_index;
268 Entry(sqe)
269 }
270}
271
272opcode! {
273 #[derive(Debug)]
278 pub struct WriteFixed {
279 fd: { impl sealed::UseFixed },
280 buf: { *const u8 },
281 len: { u32 },
282 buf_index: { u16 },
283 ;;
284 ioprio: u16 = 0,
285 offset: u64 = 0,
287 rw_flags: types::RwFlags = 0
290 }
291
292 pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294 pub fn build(self) -> Entry {
295 let WriteFixed {
296 fd,
297 buf, len, offset,
298 buf_index,
299 ioprio, rw_flags
300 } = self;
301
302 let mut sqe = sqe_zeroed();
303 sqe.opcode = Self::CODE;
304 assign_fd!(sqe.fd = fd);
305 sqe.ioprio = ioprio;
306 sqe.__bindgen_anon_2.addr = buf as _;
307 sqe.len = len;
308 sqe.__bindgen_anon_1.off = offset;
309 sqe.__bindgen_anon_3.rw_flags = rw_flags;
310 sqe.__bindgen_anon_4.buf_index = buf_index;
311 Entry(sqe)
312 }
313}
314
315opcode! {
316 #[derive(Debug)]
328 pub struct PollAdd {
329 fd: { impl sealed::UseFixed },
332 flags: { u32 },
333 ;;
334 multi: bool = false
335 }
336
337 pub const CODE = sys::IORING_OP_POLL_ADD;
338
339 pub fn build(self) -> Entry {
340 let PollAdd { fd, flags, multi } = self;
341
342 let mut sqe = sqe_zeroed();
343 sqe.opcode = Self::CODE;
344 assign_fd!(sqe.fd = fd);
345 if multi {
346 sqe.len = sys::IORING_POLL_ADD_MULTI;
347 }
348
349 #[cfg(target_endian = "little")] {
350 sqe.__bindgen_anon_3.poll32_events = flags;
351 }
352
353 #[cfg(target_endian = "big")] {
354 let x = flags << 16;
355 let y = flags >> 16;
356 let flags = x | y;
357 sqe.__bindgen_anon_3.poll32_events = flags;
358 }
359
360 Entry(sqe)
361 }
362}
363
364opcode! {
365 #[derive(Debug)]
370 pub struct PollRemove {
371 user_data: { u64 }
372 ;;
373 }
374
375 pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377 pub fn build(self) -> Entry {
378 let PollRemove { user_data } = self;
379
380 let mut sqe = sqe_zeroed();
381 sqe.opcode = Self::CODE;
382 sqe.fd = -1;
383 sqe.__bindgen_anon_2.addr = user_data;
384 Entry(sqe)
385 }
386}
387
388opcode! {
389 #[derive(Debug)]
391 pub struct SyncFileRange {
392 fd: { impl sealed::UseFixed },
393 len: { u32 },
394 ;;
395 offset: u64 = 0,
397 flags: u32 = 0
399 }
400
401 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403 pub fn build(self) -> Entry {
404 let SyncFileRange {
405 fd,
406 len, offset,
407 flags
408 } = self;
409
410 let mut sqe = sqe_zeroed();
411 sqe.opcode = Self::CODE;
412 assign_fd!(sqe.fd = fd);
413 sqe.len = len;
414 sqe.__bindgen_anon_1.off = offset;
415 sqe.__bindgen_anon_3.sync_range_flags = flags;
416 Entry(sqe)
417 }
418}
419
420opcode! {
421 #[derive(Debug)]
426 pub struct SendMsg {
427 fd: { impl sealed::UseFixed },
428 msg: { *const libc::msghdr },
429 ;;
430 ioprio: u16 = 0,
431 flags: u32 = 0
432 }
433
434 pub const CODE = sys::IORING_OP_SENDMSG;
435
436 pub fn build(self) -> Entry {
437 let SendMsg { fd, msg, ioprio, flags } = self;
438
439 let mut sqe = sqe_zeroed();
440 sqe.opcode = Self::CODE;
441 assign_fd!(sqe.fd = fd);
442 sqe.ioprio = ioprio;
443 sqe.__bindgen_anon_2.addr = msg as _;
444 sqe.len = 1;
445 sqe.__bindgen_anon_3.msg_flags = flags;
446 Entry(sqe)
447 }
448}
449
450opcode! {
451 #[derive(Debug)]
455 pub struct RecvMsg {
456 fd: { impl sealed::UseFixed },
457 msg: { *mut libc::msghdr },
458 ;;
459 ioprio: u16 = 0,
460 flags: u32 = 0,
461 buf_group: u16 = 0
462 }
463
464 pub const CODE = sys::IORING_OP_RECVMSG;
465
466 pub fn build(self) -> Entry {
467 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469 let mut sqe = sqe_zeroed();
470 sqe.opcode = Self::CODE;
471 assign_fd!(sqe.fd = fd);
472 sqe.ioprio = ioprio;
473 sqe.__bindgen_anon_2.addr = msg as _;
474 sqe.len = 1;
475 sqe.__bindgen_anon_3.msg_flags = flags;
476 sqe.__bindgen_anon_4.buf_group = buf_group;
477 Entry(sqe)
478 }
479}
480
481opcode! {
482 #[derive(Debug)]
505 pub struct RecvMsgMulti {
506 fd: { impl sealed::UseFixed },
507 msg: { *const libc::msghdr },
508 buf_group: { u16 },
509 ;;
510 ioprio: u16 = 0,
511 flags: u32 = 0
512 }
513
514 pub const CODE = sys::IORING_OP_RECVMSG;
515
516 pub fn build(self) -> Entry {
517 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519 let mut sqe = sqe_zeroed();
520 sqe.opcode = Self::CODE;
521 assign_fd!(sqe.fd = fd);
522 sqe.__bindgen_anon_2.addr = msg as _;
523 sqe.len = 1;
524 sqe.__bindgen_anon_3.msg_flags = flags;
525 sqe.__bindgen_anon_4.buf_group = buf_group;
526 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528 Entry(sqe)
529 }
530}
531
532opcode! {
533 #[derive(Debug)]
542 pub struct Timeout {
543 timespec: { *const types::Timespec },
544 ;;
545 count: u32 = 0,
549
550 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
551 }
552
553 pub const CODE = sys::IORING_OP_TIMEOUT;
554
555 pub fn build(self) -> Entry {
556 let Timeout { timespec, count, flags } = self;
557
558 let mut sqe = sqe_zeroed();
559 sqe.opcode = Self::CODE;
560 sqe.fd = -1;
561 sqe.__bindgen_anon_2.addr = timespec as _;
562 sqe.len = 1;
563 sqe.__bindgen_anon_1.off = count as _;
564 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
565 Entry(sqe)
566 }
567}
568
569opcode! {
572 pub struct TimeoutRemove {
574 user_data: { u64 },
575 ;;
576 }
577
578 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
579
580 pub fn build(self) -> Entry {
581 let TimeoutRemove { user_data } = self;
582
583 let mut sqe = sqe_zeroed();
584 sqe.opcode = Self::CODE;
585 sqe.fd = -1;
586 sqe.__bindgen_anon_2.addr = user_data;
587 Entry(sqe)
588 }
589}
590
591opcode! {
592 pub struct TimeoutUpdate {
595 user_data: { u64 },
596 timespec: { *const types::Timespec },
597 ;;
598 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
599 }
600
601 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
602
603 pub fn build(self) -> Entry {
604 let TimeoutUpdate { user_data, timespec, flags } = self;
605
606 let mut sqe = sqe_zeroed();
607 sqe.opcode = Self::CODE;
608 sqe.fd = -1;
609 sqe.__bindgen_anon_1.off = timespec as _;
610 sqe.__bindgen_anon_2.addr = user_data;
611 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
612 Entry(sqe)
613 }
614}
615
616opcode! {
617 pub struct Accept {
619 fd: { impl sealed::UseFixed },
620 addr: { *mut libc::sockaddr },
621 addrlen: { *mut libc::socklen_t },
622 ;;
623 file_index: Option<types::DestinationSlot> = None,
624 flags: i32 = 0
625 }
626
627 pub const CODE = sys::IORING_OP_ACCEPT;
628
629 pub fn build(self) -> Entry {
630 let Accept { fd, addr, addrlen, file_index, flags } = self;
631
632 let mut sqe = sqe_zeroed();
633 sqe.opcode = Self::CODE;
634 assign_fd!(sqe.fd = fd);
635 sqe.__bindgen_anon_2.addr = addr as _;
636 sqe.__bindgen_anon_1.addr2 = addrlen as _;
637 sqe.__bindgen_anon_3.accept_flags = flags as _;
638 if let Some(dest) = file_index {
639 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
640 }
641 Entry(sqe)
642 }
643}
644
645opcode! {
646 pub struct SetSockOpt {
648 fd: { impl sealed::UseFixed },
649 level: { u32 },
650 optname: { u32 },
651 optval: { *const libc::c_void },
652 optlen: { u32 },
653 ;;
654 flags: u32 = 0
655 }
656
657 pub const CODE = sys::IORING_OP_URING_CMD;
658
659 pub fn build(self) -> Entry {
660 let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
661 let mut sqe = sqe_zeroed();
662 sqe.opcode = Self::CODE;
663 assign_fd!(sqe.fd = fd);
664 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
665
666 sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
667 sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
668 sqe.__bindgen_anon_3.uring_cmd_flags = flags;
669 sqe.__bindgen_anon_5.optlen = optlen;
670 unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
671 Entry(sqe)
672 }
673}
674
675opcode! {
676 pub struct AsyncCancel {
678 user_data: { u64 }
679 ;;
680
681 }
683
684 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
685
686 pub fn build(self) -> Entry {
687 let AsyncCancel { user_data } = self;
688
689 let mut sqe = sqe_zeroed();
690 sqe.opcode = Self::CODE;
691 sqe.fd = -1;
692 sqe.__bindgen_anon_2.addr = user_data;
693 Entry(sqe)
694 }
695}
696
697opcode! {
698 pub struct LinkTimeout {
702 timespec: { *const types::Timespec },
703 ;;
704 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
705 }
706
707 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
708
709 pub fn build(self) -> Entry {
710 let LinkTimeout { timespec, flags } = self;
711
712 let mut sqe = sqe_zeroed();
713 sqe.opcode = Self::CODE;
714 sqe.fd = -1;
715 sqe.__bindgen_anon_2.addr = timespec as _;
716 sqe.len = 1;
717 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
718 Entry(sqe)
719 }
720}
721
722opcode! {
723 pub struct Connect {
725 fd: { impl sealed::UseFixed },
726 addr: { *const libc::sockaddr },
727 addrlen: { libc::socklen_t }
728 ;;
729 }
730
731 pub const CODE = sys::IORING_OP_CONNECT;
732
733 pub fn build(self) -> Entry {
734 let Connect { fd, addr, addrlen } = self;
735
736 let mut sqe = sqe_zeroed();
737 sqe.opcode = Self::CODE;
738 assign_fd!(sqe.fd = fd);
739 sqe.__bindgen_anon_2.addr = addr as _;
740 sqe.__bindgen_anon_1.off = addrlen as _;
741 Entry(sqe)
742 }
743}
744
745opcode! {
748 pub struct Fallocate {
750 fd: { impl sealed::UseFixed },
751 len: { u64 },
752 ;;
753 offset: u64 = 0,
754 mode: i32 = 0
755 }
756
757 pub const CODE = sys::IORING_OP_FALLOCATE;
758
759 pub fn build(self) -> Entry {
760 let Fallocate { fd, len, offset, mode } = self;
761
762 let mut sqe = sqe_zeroed();
763 sqe.opcode = Self::CODE;
764 assign_fd!(sqe.fd = fd);
765 sqe.__bindgen_anon_2.addr = len;
766 sqe.len = mode as _;
767 sqe.__bindgen_anon_1.off = offset;
768 Entry(sqe)
769 }
770}
771
772opcode! {
773 pub struct OpenAt {
775 dirfd: { impl sealed::UseFd },
776 pathname: { *const libc::c_char },
777 ;;
778 file_index: Option<types::DestinationSlot> = None,
779 flags: i32 = 0,
780 mode: libc::mode_t = 0
781 }
782
783 pub const CODE = sys::IORING_OP_OPENAT;
784
785 pub fn build(self) -> Entry {
786 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
787
788 let mut sqe = sqe_zeroed();
789 sqe.opcode = Self::CODE;
790 sqe.fd = dirfd;
791 sqe.__bindgen_anon_2.addr = pathname as _;
792 sqe.len = mode;
793 sqe.__bindgen_anon_3.open_flags = flags as _;
794 if let Some(dest) = file_index {
795 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
796 }
797 Entry(sqe)
798 }
799}
800
801opcode! {
802 pub struct Close {
806 fd: { impl sealed::UseFixed },
807 ;;
808 }
809
810 pub const CODE = sys::IORING_OP_CLOSE;
811
812 pub fn build(self) -> Entry {
813 let Close { fd } = self;
814
815 let mut sqe = sqe_zeroed();
816 sqe.opcode = Self::CODE;
817 match fd {
818 sealed::Target::Fd(fd) => sqe.fd = fd,
819 sealed::Target::Fixed(idx) => {
820 sqe.fd = 0;
821 sqe.__bindgen_anon_5.file_index = idx + 1;
822 }
823 }
824 Entry(sqe)
825 }
826}
827
828opcode! {
829 pub struct FilesUpdate {
833 fds: { *const RawFd },
834 len: { u32 },
835 ;;
836 offset: i32 = 0
837 }
838
839 pub const CODE = sys::IORING_OP_FILES_UPDATE;
840
841 pub fn build(self) -> Entry {
842 let FilesUpdate { fds, len, offset } = self;
843
844 let mut sqe = sqe_zeroed();
845 sqe.opcode = Self::CODE;
846 sqe.fd = -1;
847 sqe.__bindgen_anon_2.addr = fds as _;
848 sqe.len = len;
849 sqe.__bindgen_anon_1.off = offset as _;
850 Entry(sqe)
851 }
852}
853
854opcode! {
855 pub struct Statx {
857 dirfd: { impl sealed::UseFd },
858 pathname: { *const libc::c_char },
859 statxbuf: { *mut types::statx },
860 ;;
861 flags: i32 = 0,
862 mask: u32 = 0
863 }
864
865 pub const CODE = sys::IORING_OP_STATX;
866
867 pub fn build(self) -> Entry {
868 let Statx {
869 dirfd, pathname, statxbuf,
870 flags, mask
871 } = self;
872
873 let mut sqe = sqe_zeroed();
874 sqe.opcode = Self::CODE;
875 sqe.fd = dirfd;
876 sqe.__bindgen_anon_2.addr = pathname as _;
877 sqe.len = mask;
878 sqe.__bindgen_anon_1.off = statxbuf as _;
879 sqe.__bindgen_anon_3.statx_flags = flags as _;
880 Entry(sqe)
881 }
882}
883
884opcode! {
885 pub struct Read {
896 fd: { impl sealed::UseFixed },
897 buf: { *mut u8 },
898 len: { u32 },
899 ;;
900 offset: u64 = 0,
906 ioprio: u16 = 0,
907 rw_flags: types::RwFlags = 0,
908 buf_group: u16 = 0
909 }
910
911 pub const CODE = sys::IORING_OP_READ;
912
913 pub fn build(self) -> Entry {
914 let Read {
915 fd,
916 buf, len, offset,
917 ioprio, rw_flags,
918 buf_group
919 } = self;
920
921 let mut sqe = sqe_zeroed();
922 sqe.opcode = Self::CODE;
923 assign_fd!(sqe.fd = fd);
924 sqe.ioprio = ioprio;
925 sqe.__bindgen_anon_2.addr = buf as _;
926 sqe.len = len;
927 sqe.__bindgen_anon_1.off = offset;
928 sqe.__bindgen_anon_3.rw_flags = rw_flags;
929 sqe.__bindgen_anon_4.buf_group = buf_group;
930 Entry(sqe)
931 }
932}
933
934opcode! {
935 pub struct Write {
946 fd: { impl sealed::UseFixed },
947 buf: { *const u8 },
948 len: { u32 },
949 ;;
950 offset: u64 = 0,
956 ioprio: u16 = 0,
957 rw_flags: types::RwFlags = 0
958 }
959
960 pub const CODE = sys::IORING_OP_WRITE;
961
962 pub fn build(self) -> Entry {
963 let Write {
964 fd,
965 buf, len, offset,
966 ioprio, rw_flags
967 } = self;
968
969 let mut sqe = sqe_zeroed();
970 sqe.opcode = Self::CODE;
971 assign_fd!(sqe.fd = fd);
972 sqe.ioprio = ioprio;
973 sqe.__bindgen_anon_2.addr = buf as _;
974 sqe.len = len;
975 sqe.__bindgen_anon_1.off = offset;
976 sqe.__bindgen_anon_3.rw_flags = rw_flags;
977 Entry(sqe)
978 }
979}
980
981opcode! {
982 pub struct Fadvise {
984 fd: { impl sealed::UseFixed },
985 len: { libc::off_t },
986 advice: { i32 },
987 ;;
988 offset: u64 = 0,
989 }
990
991 pub const CODE = sys::IORING_OP_FADVISE;
992
993 pub fn build(self) -> Entry {
994 let Fadvise { fd, len, advice, offset } = self;
995
996 let mut sqe = sqe_zeroed();
997 sqe.opcode = Self::CODE;
998 assign_fd!(sqe.fd = fd);
999 sqe.len = len as _;
1000 sqe.__bindgen_anon_1.off = offset;
1001 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1002 Entry(sqe)
1003 }
1004}
1005
1006opcode! {
1007 pub struct Madvise {
1009 addr: { *const libc::c_void },
1010 len: { libc::off_t },
1011 advice: { i32 },
1012 ;;
1013 }
1014
1015 pub const CODE = sys::IORING_OP_MADVISE;
1016
1017 pub fn build(self) -> Entry {
1018 let Madvise { addr, len, advice } = self;
1019
1020 let mut sqe = sqe_zeroed();
1021 sqe.opcode = Self::CODE;
1022 sqe.fd = -1;
1023 sqe.__bindgen_anon_2.addr = addr as _;
1024 sqe.len = len as _;
1025 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1026 Entry(sqe)
1027 }
1028}
1029
1030opcode! {
1031 pub struct Send {
1033 fd: { impl sealed::UseFixed },
1034 buf: { *const u8 },
1035 len: { u32 },
1036 ;;
1037 flags: i32 = 0,
1038
1039 dest_addr: *const libc::sockaddr = core::ptr::null(),
1044 dest_addr_len: libc::socklen_t = 0,
1045 }
1046
1047 pub const CODE = sys::IORING_OP_SEND;
1048
1049 pub fn build(self) -> Entry {
1050 let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1051
1052 let mut sqe = sqe_zeroed();
1053 sqe.opcode = Self::CODE;
1054 assign_fd!(sqe.fd = fd);
1055 sqe.__bindgen_anon_2.addr = buf as _;
1056 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1057 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1058 sqe.len = len;
1059 sqe.__bindgen_anon_3.msg_flags = flags as _;
1060 Entry(sqe)
1061 }
1062}
1063
1064opcode! {
1065 pub struct Recv {
1067 fd: { impl sealed::UseFixed },
1068 buf: { *mut u8 },
1069 len: { u32 },
1070 ;;
1071 flags: i32 = 0,
1072 buf_group: u16 = 0
1073 }
1074
1075 pub const CODE = sys::IORING_OP_RECV;
1076
1077 pub fn build(self) -> Entry {
1078 let Recv { fd, buf, len, flags, buf_group } = self;
1079
1080 let mut sqe = sqe_zeroed();
1081 sqe.opcode = Self::CODE;
1082 assign_fd!(sqe.fd = fd);
1083 sqe.__bindgen_anon_2.addr = buf as _;
1084 sqe.len = len;
1085 sqe.__bindgen_anon_3.msg_flags = flags as _;
1086 sqe.__bindgen_anon_4.buf_group = buf_group;
1087 Entry(sqe)
1088 }
1089}
1090
1091opcode! {
1092 pub struct RecvMulti {
1108 fd: { impl sealed::UseFixed },
1109 buf_group: { u16 },
1110 ;;
1111 flags: i32 = 0,
1112 }
1113
1114 pub const CODE = sys::IORING_OP_RECV;
1115
1116 pub fn build(self) -> Entry {
1117 let RecvMulti { fd, buf_group, flags } = self;
1118
1119 let mut sqe = sqe_zeroed();
1120 sqe.opcode = Self::CODE;
1121 assign_fd!(sqe.fd = fd);
1122 sqe.__bindgen_anon_3.msg_flags = flags as _;
1123 sqe.__bindgen_anon_4.buf_group = buf_group;
1124 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1125 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1126 Entry(sqe)
1127 }
1128}
1129
1130opcode! {
1131 pub struct OpenAt2 {
1133 dirfd: { impl sealed::UseFd },
1134 pathname: { *const libc::c_char },
1135 how: { *const types::OpenHow }
1136 ;;
1137 file_index: Option<types::DestinationSlot> = None,
1138 }
1139
1140 pub const CODE = sys::IORING_OP_OPENAT2;
1141
1142 pub fn build(self) -> Entry {
1143 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1144
1145 let mut sqe = sqe_zeroed();
1146 sqe.opcode = Self::CODE;
1147 sqe.fd = dirfd;
1148 sqe.__bindgen_anon_2.addr = pathname as _;
1149 sqe.len = mem::size_of::<sys::open_how>() as _;
1150 sqe.__bindgen_anon_1.off = how as _;
1151 if let Some(dest) = file_index {
1152 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1153 }
1154 Entry(sqe)
1155 }
1156}
1157
1158opcode! {
1159 pub struct EpollCtl {
1161 epfd: { impl sealed::UseFixed },
1162 fd: { impl sealed::UseFd },
1163 op: { i32 },
1164 ev: { *const types::epoll_event },
1165 ;;
1166 }
1167
1168 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1169
1170 pub fn build(self) -> Entry {
1171 let EpollCtl { epfd, fd, op, ev } = self;
1172
1173 let mut sqe = sqe_zeroed();
1174 sqe.opcode = Self::CODE;
1175 assign_fd!(sqe.fd = epfd);
1176 sqe.__bindgen_anon_2.addr = ev as _;
1177 sqe.len = op as _;
1178 sqe.__bindgen_anon_1.off = fd as _;
1179 Entry(sqe)
1180 }
1181}
1182
1183opcode! {
1186 pub struct Splice {
1191 fd_in: { impl sealed::UseFixed },
1192 off_in: { i64 },
1193 fd_out: { impl sealed::UseFixed },
1194 off_out: { i64 },
1195 len: { u32 },
1196 ;;
1197 flags: u32 = 0
1199 }
1200
1201 pub const CODE = sys::IORING_OP_SPLICE;
1202
1203 pub fn build(self) -> Entry {
1204 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1205
1206 let mut sqe = sqe_zeroed();
1207 sqe.opcode = Self::CODE;
1208 assign_fd!(sqe.fd = fd_out);
1209 sqe.len = len;
1210 sqe.__bindgen_anon_1.off = off_out as _;
1211
1212 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1213 sealed::Target::Fd(fd) => fd,
1214 sealed::Target::Fixed(idx) => {
1215 flags |= sys::SPLICE_F_FD_IN_FIXED;
1216 idx as _
1217 }
1218 };
1219
1220 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1221 sqe.__bindgen_anon_3.splice_flags = flags;
1222 Entry(sqe)
1223 }
1224}
1225
1226opcode! {
1227 pub struct ProvideBuffers {
1231 addr: { *mut u8 },
1232 len: { i32 },
1233 nbufs: { u16 },
1234 bgid: { u16 },
1235 bid: { u16 }
1236 ;;
1237 }
1238
1239 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1240
1241 pub fn build(self) -> Entry {
1242 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1243
1244 let mut sqe = sqe_zeroed();
1245 sqe.opcode = Self::CODE;
1246 sqe.fd = nbufs as _;
1247 sqe.__bindgen_anon_2.addr = addr as _;
1248 sqe.len = len as _;
1249 sqe.__bindgen_anon_1.off = bid as _;
1250 sqe.__bindgen_anon_4.buf_group = bgid;
1251 Entry(sqe)
1252 }
1253}
1254
1255opcode! {
1256 pub struct RemoveBuffers {
1259 nbufs: { u16 },
1260 bgid: { u16 }
1261 ;;
1262 }
1263
1264 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1265
1266 pub fn build(self) -> Entry {
1267 let RemoveBuffers { nbufs, bgid } = self;
1268
1269 let mut sqe = sqe_zeroed();
1270 sqe.opcode = Self::CODE;
1271 sqe.fd = nbufs as _;
1272 sqe.__bindgen_anon_4.buf_group = bgid;
1273 Entry(sqe)
1274 }
1275}
1276
1277opcode! {
1280 pub struct Tee {
1282 fd_in: { impl sealed::UseFixed },
1283 fd_out: { impl sealed::UseFixed },
1284 len: { u32 }
1285 ;;
1286 flags: u32 = 0
1287 }
1288
1289 pub const CODE = sys::IORING_OP_TEE;
1290
1291 pub fn build(self) -> Entry {
1292 let Tee { fd_in, fd_out, len, mut flags } = self;
1293
1294 let mut sqe = sqe_zeroed();
1295 sqe.opcode = Self::CODE;
1296
1297 assign_fd!(sqe.fd = fd_out);
1298 sqe.len = len;
1299
1300 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1301 sealed::Target::Fd(fd) => fd,
1302 sealed::Target::Fixed(idx) => {
1303 flags |= sys::SPLICE_F_FD_IN_FIXED;
1304 idx as _
1305 }
1306 };
1307
1308 sqe.__bindgen_anon_3.splice_flags = flags;
1309
1310 Entry(sqe)
1311 }
1312}
1313
1314opcode! {
1317 pub struct Shutdown {
1320 fd: { impl sealed::UseFixed },
1321 how: { i32 },
1322 ;;
1323 }
1324
1325 pub const CODE = sys::IORING_OP_SHUTDOWN;
1326
1327 pub fn build(self) -> Entry {
1328 let Shutdown { fd, how } = self;
1329
1330 let mut sqe = sqe_zeroed();
1331 sqe.opcode = Self::CODE;
1332 assign_fd!(sqe.fd = fd);
1333 sqe.len = how as _;
1334 Entry(sqe)
1335 }
1336}
1337
1338opcode! {
1339 pub struct RenameAt {
1342 olddirfd: { impl sealed::UseFd },
1343 oldpath: { *const libc::c_char },
1344 newdirfd: { impl sealed::UseFd },
1345 newpath: { *const libc::c_char },
1346 ;;
1347 flags: u32 = 0
1348 }
1349
1350 pub const CODE = sys::IORING_OP_RENAMEAT;
1351
1352 pub fn build(self) -> Entry {
1353 let RenameAt {
1354 olddirfd, oldpath,
1355 newdirfd, newpath,
1356 flags
1357 } = self;
1358
1359 let mut sqe = sqe_zeroed();
1360 sqe.opcode = Self::CODE;
1361 sqe.fd = olddirfd;
1362 sqe.__bindgen_anon_2.addr = oldpath as _;
1363 sqe.len = newdirfd as _;
1364 sqe.__bindgen_anon_1.off = newpath as _;
1365 sqe.__bindgen_anon_3.rename_flags = flags;
1366 Entry(sqe)
1367 }
1368}
1369
1370opcode! {
1371 pub struct UnlinkAt {
1374 dirfd: { impl sealed::UseFd },
1375 pathname: { *const libc::c_char },
1376 ;;
1377 flags: i32 = 0
1378 }
1379
1380 pub const CODE = sys::IORING_OP_UNLINKAT;
1381
1382 pub fn build(self) -> Entry {
1383 let UnlinkAt { dirfd, pathname, flags } = self;
1384
1385 let mut sqe = sqe_zeroed();
1386 sqe.opcode = Self::CODE;
1387 sqe.fd = dirfd;
1388 sqe.__bindgen_anon_2.addr = pathname as _;
1389 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1390 Entry(sqe)
1391 }
1392}
1393
1394opcode! {
1397 pub struct MkDirAt {
1399 dirfd: { impl sealed::UseFd },
1400 pathname: { *const libc::c_char },
1401 ;;
1402 mode: libc::mode_t = 0
1403 }
1404
1405 pub const CODE = sys::IORING_OP_MKDIRAT;
1406
1407 pub fn build(self) -> Entry {
1408 let MkDirAt { dirfd, pathname, mode } = self;
1409
1410 let mut sqe = sqe_zeroed();
1411 sqe.opcode = Self::CODE;
1412 sqe.fd = dirfd;
1413 sqe.__bindgen_anon_2.addr = pathname as _;
1414 sqe.len = mode;
1415 Entry(sqe)
1416 }
1417}
1418
1419opcode! {
1420 pub struct SymlinkAt {
1422 newdirfd: { impl sealed::UseFd },
1423 target: { *const libc::c_char },
1424 linkpath: { *const libc::c_char },
1425 ;;
1426 }
1427
1428 pub const CODE = sys::IORING_OP_SYMLINKAT;
1429
1430 pub fn build(self) -> Entry {
1431 let SymlinkAt { newdirfd, target, linkpath } = self;
1432
1433 let mut sqe = sqe_zeroed();
1434 sqe.opcode = Self::CODE;
1435 sqe.fd = newdirfd;
1436 sqe.__bindgen_anon_2.addr = target as _;
1437 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1438 Entry(sqe)
1439 }
1440}
1441
1442opcode! {
1443 pub struct LinkAt {
1445 olddirfd: { impl sealed::UseFd },
1446 oldpath: { *const libc::c_char },
1447 newdirfd: { impl sealed::UseFd },
1448 newpath: { *const libc::c_char },
1449 ;;
1450 flags: i32 = 0
1451 }
1452
1453 pub const CODE = sys::IORING_OP_LINKAT;
1454
1455 pub fn build(self) -> Entry {
1456 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1457
1458 let mut sqe = sqe_zeroed();
1459 sqe.opcode = Self::CODE;
1460 sqe.fd = olddirfd as _;
1461 sqe.__bindgen_anon_2.addr = oldpath as _;
1462 sqe.len = newdirfd as _;
1463 sqe.__bindgen_anon_1.addr2 = newpath as _;
1464 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1465 Entry(sqe)
1466 }
1467}
1468
1469opcode! {
1472 pub struct MsgRingData {
1474 ring_fd: { impl sealed::UseFd },
1475 result: { i32 },
1476 user_data: { u64 },
1477 user_flags: { Option<u32> },
1478 ;;
1479 opcode_flags: u32 = 0
1480 }
1481
1482 pub const CODE = sys::IORING_OP_MSG_RING;
1483
1484 pub fn build(self) -> Entry {
1485 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1486
1487 let mut sqe = sqe_zeroed();
1488 sqe.opcode = Self::CODE;
1489 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1490 sqe.fd = ring_fd;
1491 sqe.len = result as u32;
1492 sqe.__bindgen_anon_1.off = user_data;
1493 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1494 if let Some(flags) = user_flags {
1495 sqe.__bindgen_anon_5.file_index = flags;
1496 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1497 }
1498 Entry(sqe)
1499 }
1500}
1501
1502opcode! {
1505 pub struct AsyncCancel2 {
1509 builder: { types::CancelBuilder }
1510 ;;
1511 }
1512
1513 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1514
1515 pub fn build(self) -> Entry {
1516 let AsyncCancel2 { builder } = self;
1517
1518 let mut sqe = sqe_zeroed();
1519 sqe.opcode = Self::CODE;
1520 sqe.fd = builder.to_fd();
1521 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1522 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1523 Entry(sqe)
1524 }
1525}
1526
1527opcode! {
1528 pub struct UringCmd16 {
1530 fd: { impl sealed::UseFixed },
1531 cmd_op: { u32 },
1532 ;;
1533 buf_index: Option<u16> = None,
1536 cmd: [u8; 16] = [0u8; 16]
1538 }
1539
1540 pub const CODE = sys::IORING_OP_URING_CMD;
1541
1542 pub fn build(self) -> Entry {
1543 let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1544
1545 let mut sqe = sqe_zeroed();
1546 sqe.opcode = Self::CODE;
1547 assign_fd!(sqe.fd = fd);
1548 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1549 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1550 if let Some(buf_index) = buf_index {
1551 sqe.__bindgen_anon_4.buf_index = buf_index;
1552 unsafe {
1553 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1554 }
1555 }
1556 Entry(sqe)
1557 }
1558}
1559
1560opcode! {
1561 pub struct UringCmd80 {
1563 fd: { impl sealed::UseFixed },
1564 cmd_op: { u32 },
1565 ;;
1566 buf_index: Option<u16> = None,
1569 cmd: [u8; 80] = [0u8; 80]
1571 }
1572
1573 pub const CODE = sys::IORING_OP_URING_CMD;
1574
1575 pub fn build(self) -> Entry128 {
1576 let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1577
1578 let cmd1 = cmd[..16].try_into().unwrap();
1579 let cmd2 = cmd[16..].try_into().unwrap();
1580
1581 let mut sqe = sqe_zeroed();
1582 sqe.opcode = Self::CODE;
1583 assign_fd!(sqe.fd = fd);
1584 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1585 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1586 if let Some(buf_index) = buf_index {
1587 sqe.__bindgen_anon_4.buf_index = buf_index;
1588 unsafe {
1589 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1590 }
1591 }
1592 Entry128(Entry(sqe), cmd2)
1593 }
1594}
1595
1596opcode! {
1597 pub struct Socket {
1607 domain: { i32 },
1608 socket_type: { i32 },
1609 protocol: { i32 },
1610 ;;
1611 file_index: Option<types::DestinationSlot> = None,
1612 flags: types::RwFlags = 0,
1613 }
1614
1615 pub const CODE = sys::IORING_OP_SOCKET;
1616
1617 pub fn build(self) -> Entry {
1618 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1619
1620 let mut sqe = sqe_zeroed();
1621 sqe.opcode = Self::CODE;
1622 sqe.fd = domain as _;
1623 sqe.__bindgen_anon_1.off = socket_type as _;
1624 sqe.len = protocol as _;
1625 sqe.__bindgen_anon_3.rw_flags = flags;
1626 if let Some(dest) = file_index {
1627 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1628 }
1629 Entry(sqe)
1630 }
1631}
1632
1633opcode! {
1634 pub struct AcceptMulti {
1640 fd: { impl sealed::UseFixed },
1641 ;;
1642 allocate_file_index: bool = false,
1643 flags: i32 = 0
1644 }
1645
1646 pub const CODE = sys::IORING_OP_ACCEPT;
1647
1648 pub fn build(self) -> Entry {
1649 let AcceptMulti { fd, allocate_file_index, flags } = self;
1650
1651 let mut sqe = sqe_zeroed();
1652 sqe.opcode = Self::CODE;
1653 assign_fd!(sqe.fd = fd);
1654 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1655 sqe.__bindgen_anon_3.accept_flags = flags as _;
1658 if allocate_file_index {
1659 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1660 }
1661 Entry(sqe)
1662 }
1663}
1664
1665opcode! {
1668 pub struct MsgRingSendFd {
1670 ring_fd: { impl sealed::UseFd },
1671 fixed_slot_src: { types::Fixed },
1672 dest_slot_index: { types::DestinationSlot },
1673 user_data: { u64 },
1674 ;;
1675 opcode_flags: u32 = 0
1676 }
1677
1678 pub const CODE = sys::IORING_OP_MSG_RING;
1679
1680 pub fn build(self) -> Entry {
1681 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1682
1683 let mut sqe = sqe_zeroed();
1684 sqe.opcode = Self::CODE;
1685 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1686 sqe.fd = ring_fd;
1687 sqe.__bindgen_anon_1.off = user_data;
1688 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1689 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1690 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1691 Entry(sqe)
1692 }
1693}
1694
1695opcode! {
1698 pub struct SendZc {
1712 fd: { impl sealed::UseFixed },
1713 buf: { *const u8 },
1714 len: { u32 },
1715 ;;
1716 buf_index: Option<u16> = None,
1723 dest_addr: *const libc::sockaddr = core::ptr::null(),
1724 dest_addr_len: libc::socklen_t = 0,
1725 flags: i32 = 0,
1726 zc_flags: u16 = 0,
1727 }
1728
1729 pub const CODE = sys::IORING_OP_SEND_ZC;
1730
1731 pub fn build(self) -> Entry {
1732 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1733
1734 let mut sqe = sqe_zeroed();
1735 sqe.opcode = Self::CODE;
1736 assign_fd!(sqe.fd = fd);
1737 sqe.__bindgen_anon_2.addr = buf as _;
1738 sqe.len = len;
1739 sqe.__bindgen_anon_3.msg_flags = flags as _;
1740 sqe.ioprio = zc_flags;
1741 if let Some(buf_index) = buf_index {
1742 sqe.__bindgen_anon_4.buf_index = buf_index;
1743 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1744 }
1745 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1746 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1747 Entry(sqe)
1748 }
1749}
1750
1751opcode! {
1754 #[derive(Debug)]
1759 pub struct SendMsgZc {
1760 fd: { impl sealed::UseFixed },
1761 msg: { *const libc::msghdr },
1762 ;;
1763 ioprio: u16 = 0,
1764 flags: u32 = 0
1765 }
1766
1767 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1768
1769 pub fn build(self) -> Entry {
1770 let SendMsgZc { fd, msg, ioprio, flags } = self;
1771
1772 let mut sqe = sqe_zeroed();
1773 sqe.opcode = Self::CODE;
1774 assign_fd!(sqe.fd = fd);
1775 sqe.ioprio = ioprio;
1776 sqe.__bindgen_anon_2.addr = msg as _;
1777 sqe.len = 1;
1778 sqe.__bindgen_anon_3.msg_flags = flags;
1779 Entry(sqe)
1780 }
1781}
1782
1783opcode! {
1786 #[derive(Debug)]
1795 pub struct FutexWait {
1796 futex: { *const u32 },
1797 val: { u64 },
1798 mask: { u64 },
1799 futex_flags: { u32 },
1800 ;;
1801 flags: u32 = 0
1802 }
1803
1804 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1805
1806 pub fn build(self) -> Entry {
1807 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1808
1809 let mut sqe = sqe_zeroed();
1810 sqe.opcode = Self::CODE;
1811 sqe.fd = futex_flags as _;
1812 sqe.__bindgen_anon_2.addr = futex as usize as _;
1813 sqe.__bindgen_anon_1.off = val;
1814 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1815 sqe.__bindgen_anon_3.futex_flags = flags;
1816 Entry(sqe)
1817 }
1818}
1819
1820opcode! {
1821 #[derive(Debug)]
1829 pub struct FutexWake {
1830 futex: { *const u32 },
1831 val: { u64 },
1832 mask: { u64 },
1833 futex_flags: { u32 },
1834 ;;
1835 flags: u32 = 0
1836 }
1837
1838 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1839
1840 pub fn build(self) -> Entry {
1841 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1842
1843 let mut sqe = sqe_zeroed();
1844 sqe.opcode = Self::CODE;
1845 sqe.fd = futex_flags as _;
1846 sqe.__bindgen_anon_2.addr = futex as usize as _;
1847 sqe.__bindgen_anon_1.off = val;
1848 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1849 sqe.__bindgen_anon_3.futex_flags = flags;
1850 Entry(sqe)
1851 }
1852}
1853
1854opcode! {
1855 #[derive(Debug)]
1861 pub struct FutexWaitV {
1862 futexv: { *const types::FutexWaitV },
1863 nr_futex: { u32 },
1864 ;;
1865 flags: u32 = 0
1866 }
1867
1868 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1869
1870 pub fn build(self) -> Entry {
1871 let FutexWaitV { futexv, nr_futex, flags } = self;
1872
1873 let mut sqe = sqe_zeroed();
1874 sqe.opcode = Self::CODE;
1875 sqe.__bindgen_anon_2.addr = futexv as usize as _;
1876 sqe.len = nr_futex;
1877 sqe.__bindgen_anon_3.futex_flags = flags;
1878 Entry(sqe)
1879 }
1880}
1881
1882opcode! {
1883 #[derive(Debug)]
1887 pub struct WaitId {
1888 idtype: { libc::idtype_t },
1889 id: { libc::id_t },
1890 options: { libc::c_int },
1891 ;;
1892 infop: *const libc::siginfo_t = std::ptr::null(),
1893 flags: libc::c_uint = 0,
1894 }
1895
1896 pub const CODE = sys::IORING_OP_WAITID;
1897
1898 pub fn build(self) -> Entry {
1899 let mut sqe = sqe_zeroed();
1900 sqe.opcode = Self::CODE;
1901 sqe.fd = self.id as _;
1902 sqe.len = self.idtype as _;
1903 sqe.__bindgen_anon_3.waitid_flags = self.flags;
1904 sqe.__bindgen_anon_5.file_index = self.options as _;
1905 sqe.__bindgen_anon_1.addr2 = self.infop as _;
1906 Entry(sqe)
1907 }
1908}
1909
1910opcode! {
1913 #[derive(Debug)]
1918 pub struct FixedFdInstall {
1919 fd: { types::Fixed },
1920 file_flags: { u32 },
1921 ;;
1922 }
1923
1924 pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
1925
1926 pub fn build(self) -> Entry {
1927 let FixedFdInstall { fd, file_flags } = self;
1928
1929 let mut sqe = sqe_zeroed();
1930 sqe.opcode = Self::CODE;
1931 sqe.fd = fd.0 as _;
1932 sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
1933 sqe.__bindgen_anon_3.install_fd_flags = file_flags;
1934 Entry(sqe)
1935 }
1936}
1937
1938opcode! {
1941 #[derive(Debug)]
1943 pub struct Ftruncate {
1944 fd: { impl sealed::UseFixed },
1945 len: { u64 },
1946 ;;
1947 }
1948
1949 pub const CODE = sys::IORING_OP_FTRUNCATE;
1950
1951 pub fn build(self) -> Entry {
1952 let Ftruncate { fd, len } = self;
1953
1954 let mut sqe = sqe_zeroed();
1955 sqe.opcode = Self::CODE;
1956 assign_fd!(sqe.fd = fd);
1957 sqe.__bindgen_anon_1.off = len;
1958 Entry(sqe)
1959 }
1960}
1961
1962opcode! {
1965 pub struct SendBundle {
1967 fd: { impl sealed::UseFixed },
1968 buf_group: { u16 },
1969 ;;
1970 flags: i32 = 0,
1971 len: u32 = 0
1972 }
1973
1974 pub const CODE = sys::IORING_OP_SEND;
1975
1976 pub fn build(self) -> Entry {
1977 let SendBundle { fd, len, flags, buf_group } = self;
1978
1979 let mut sqe = sqe_zeroed();
1980 sqe.opcode = Self::CODE;
1981 assign_fd!(sqe.fd = fd);
1982 sqe.len = len;
1983 sqe.__bindgen_anon_3.msg_flags = flags as _;
1984 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1985 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1986 sqe.__bindgen_anon_4.buf_group = buf_group;
1987 Entry(sqe)
1988 }
1989}
1990
1991opcode! {
1992 pub struct RecvBundle {
2002 fd: { impl sealed::UseFixed },
2003 buf_group: { u16 },
2004 ;;
2005 flags: i32 = 0
2006 }
2007
2008 pub const CODE = sys::IORING_OP_RECV;
2009
2010 pub fn build(self) -> Entry {
2011 let RecvBundle { fd, buf_group, flags } = self;
2012
2013 let mut sqe = sqe_zeroed();
2014 sqe.opcode = Self::CODE;
2015 assign_fd!(sqe.fd = fd);
2016 sqe.__bindgen_anon_3.msg_flags = flags as _;
2017 sqe.__bindgen_anon_4.buf_group = buf_group;
2018 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2019 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2020 Entry(sqe)
2021 }
2022}
2023
2024opcode! {
2025 pub struct RecvMultiBundle {
2043 fd: { impl sealed::UseFixed },
2044 buf_group: { u16 },
2045 ;;
2046 flags: i32 = 0
2047 }
2048
2049 pub const CODE = sys::IORING_OP_RECV;
2050
2051 pub fn build(self) -> Entry {
2052 let RecvMultiBundle { fd, buf_group, flags } = self;
2053
2054 let mut sqe = sqe_zeroed();
2055 sqe.opcode = Self::CODE;
2056 assign_fd!(sqe.fd = fd);
2057 sqe.__bindgen_anon_3.msg_flags = flags as _;
2058 sqe.__bindgen_anon_4.buf_group = buf_group;
2059 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2060 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2061 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2062 Entry(sqe)
2063 }
2064}