io_uring/
submit.rs

1use std::os::unix::io::{AsRawFd, RawFd};
2use std::sync::atomic;
3use std::{io, mem, ptr};
4
5use crate::register::{execute, Probe};
6use crate::sys;
7use crate::types::{CancelBuilder, Timespec};
8use crate::util::{cast_ptr, OwnedFd};
9use crate::Parameters;
10
11use crate::register::Restriction;
12
13use crate::types;
14
15/// Interface for submitting submission queue events in an io_uring instance to the kernel for
16/// executing and registering files or buffers with the instance.
17///
18/// io_uring supports both directly performing I/O on buffers and file descriptors and registering
19/// them beforehand. Registering is slow, but it makes performing the actual I/O much faster.
20pub struct Submitter<'a> {
21    fd: &'a OwnedFd,
22    params: &'a Parameters,
23
24    sq_head: *const atomic::AtomicU32,
25    sq_tail: *const atomic::AtomicU32,
26    sq_flags: *const atomic::AtomicU32,
27}
28
29impl<'a> Submitter<'a> {
30    #[inline]
31    pub(crate) const fn new(
32        fd: &'a OwnedFd,
33        params: &'a Parameters,
34        sq_head: *const atomic::AtomicU32,
35        sq_tail: *const atomic::AtomicU32,
36        sq_flags: *const atomic::AtomicU32,
37    ) -> Submitter<'a> {
38        Submitter {
39            fd,
40            params,
41            sq_head,
42            sq_tail,
43            sq_flags,
44        }
45    }
46
47    #[inline]
48    fn sq_len(&self) -> usize {
49        unsafe {
50            let head = (*self.sq_head).load(atomic::Ordering::Acquire);
51            let tail = (*self.sq_tail).load(atomic::Ordering::Acquire);
52
53            tail.wrapping_sub(head) as usize
54        }
55    }
56
57    /// Whether the kernel thread has gone to sleep because it waited for too long without
58    /// submission queue entries.
59    #[inline]
60    fn sq_need_wakeup(&self) -> bool {
61        unsafe {
62            (*self.sq_flags).load(atomic::Ordering::Relaxed) & sys::IORING_SQ_NEED_WAKEUP != 0
63        }
64    }
65
66    /// CQ ring is overflown
67    fn sq_cq_overflow(&self) -> bool {
68        unsafe {
69            (*self.sq_flags).load(atomic::Ordering::Relaxed) & sys::IORING_SQ_CQ_OVERFLOW != 0
70        }
71    }
72
73    /// Initiate and/or complete asynchronous I/O. This is a low-level wrapper around
74    /// `io_uring_enter` - see `man io_uring_enter` (or [its online
75    /// version](https://manpages.debian.org/unstable/liburing-dev/io_uring_enter.2.en.html) for
76    /// more details.
77    ///
78    /// You will probably want to use a more high-level API such as
79    /// [`submit`](Self::submit) or [`submit_and_wait`](Self::submit_and_wait).
80    ///
81    /// # Safety
82    ///
83    /// This provides a raw interface so developer must ensure that parameters are correct.
84    pub unsafe fn enter<T: Sized>(
85        &self,
86        to_submit: u32,
87        min_complete: u32,
88        flag: u32,
89        arg: Option<&T>,
90    ) -> io::Result<usize> {
91        let arg = arg
92            .map(|arg| cast_ptr(arg).cast())
93            .unwrap_or_else(ptr::null);
94        let size = mem::size_of::<T>();
95        sys::io_uring_enter(
96            self.fd.as_raw_fd(),
97            to_submit,
98            min_complete,
99            flag,
100            arg,
101            size,
102        )
103        .map(|res| res as _)
104    }
105
106    /// Submit all queued submission queue events to the kernel.
107    #[inline]
108    pub fn submit(&self) -> io::Result<usize> {
109        self.submit_and_wait(0)
110    }
111
112    /// Submit all queued submission queue events to the kernel and wait for at least `want`
113    /// completion events to complete.
114    pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
115        let len = self.sq_len();
116        let mut flags = 0;
117
118        // This logic suffers from the fact the sq_cq_overflow and sq_need_wakeup
119        // each cause an atomic load of the same variable, self.sq_flags.
120        // In the hottest paths, when a server is running with sqpoll,
121        // this is going to be hit twice, when once would be sufficient.
122        // However, consider that the `SeqCst` barrier required for interpreting
123        // the IORING_ENTER_SQ_WAKEUP bit is required in all paths where sqpoll
124        // is setup when consolidating the reads.
125
126        let sq_cq_overflow = self.sq_cq_overflow();
127
128        // When IORING_FEAT_NODROP is enabled and CQ overflows, the kernel buffers
129        // completion events internally but doesn't automatically flush them when
130        // CQ space becomes available. We must explicitly call io_uring_enter()
131        // to flush these buffered events, even with SQPOLL enabled.
132        //
133        // Without this, completions remain stuck in kernel's internal buffer
134        // after draining CQ, causing missing completion notifications.
135        let need_syscall_for_overflow = sq_cq_overflow && self.params.is_feature_nodrop();
136
137        if want > 0 || self.params.is_setup_iopoll() || sq_cq_overflow {
138            flags |= sys::IORING_ENTER_GETEVENTS;
139        }
140
141        if self.params.is_setup_sqpoll() {
142            // See discussion in [`SubmissionQueue::need_wakeup`].
143            atomic::fence(atomic::Ordering::SeqCst);
144            if self.sq_need_wakeup() {
145                flags |= sys::IORING_ENTER_SQ_WAKEUP;
146            } else if want == 0 && !need_syscall_for_overflow {
147                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
148                // it to process events or wake it up
149
150                // However, if the CQ ring is overflown, we need to tell the kernel to process events
151                // by calling io_uring_enter with the IORING_ENTER_GETEVENTS flag.
152                return Ok(len);
153            }
154        }
155
156        unsafe { self.enter::<libc::sigset_t>(len as _, want as _, flags, None) }
157    }
158
159    /// Submit all queued submission queue events to the kernel and wait for at least `want`
160    /// completion events to complete with additional options
161    ///
162    /// You can specify a set of signals to mask and a timeout for operation, see
163    /// [`SubmitArgs`](types::SubmitArgs) for more details
164    pub fn submit_with_args(
165        &self,
166        want: usize,
167        args: &types::SubmitArgs<'_, '_>,
168    ) -> io::Result<usize> {
169        let len = self.sq_len();
170        let mut flags = sys::IORING_ENTER_EXT_ARG;
171
172        let sq_cq_overflow = self.sq_cq_overflow();
173        let need_syscall = sq_cq_overflow & self.params.is_feature_nodrop();
174
175        if want > 0 || self.params.is_setup_iopoll() || sq_cq_overflow {
176            flags |= sys::IORING_ENTER_GETEVENTS;
177        }
178
179        if self.params.is_setup_sqpoll() {
180            // See discussion in [`SubmissionQueue::need_wakeup`].
181            atomic::fence(atomic::Ordering::SeqCst);
182            if self.sq_need_wakeup() {
183                flags |= sys::IORING_ENTER_SQ_WAKEUP;
184            } else if want == 0 && !need_syscall {
185                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
186                // it to process events or wake it up
187                return Ok(len);
188            }
189        }
190
191        unsafe { self.enter(len as _, want as _, flags, Some(&args.args)) }
192    }
193
194    /// Wait for the submission queue to have free entries.
195    pub fn squeue_wait(&self) -> io::Result<usize> {
196        unsafe { self.enter::<libc::sigset_t>(0, 0, sys::IORING_ENTER_SQ_WAIT, None) }
197    }
198
199    /// Register in-memory fixed buffers for I/O with the kernel. You can use these buffers with the
200    /// [`ReadFixed`](crate::opcode::ReadFixed) and [`WriteFixed`](crate::opcode::WriteFixed)
201    /// operations.
202    ///
203    /// # Safety
204    ///
205    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
206    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
207    /// behaviour may occur.
208    pub unsafe fn register_buffers(&self, bufs: &[libc::iovec]) -> io::Result<()> {
209        execute(
210            self.fd.as_raw_fd(),
211            sys::IORING_REGISTER_BUFFERS,
212            bufs.as_ptr().cast(),
213            bufs.len() as _,
214        )
215        .map(drop)
216    }
217
218    /// Update a range of fixed buffers starting at `offset`.
219    ///
220    /// This is required to use buffers registered using
221    /// [`register_buffers_sparse`](Self::register_buffers_sparse),
222    /// although it can be also be used with [`register_buffers`](Self::register_buffers).
223    ///
224    /// See [`register_buffers2`](Self::register_buffers2)
225    /// for more information about resource tagging.
226    ///
227    /// Available since Linux 5.13.
228    ///
229    /// # Safety
230    ///
231    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
232    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
233    /// behaviour may occur.
234    pub unsafe fn register_buffers_update(
235        &self,
236        offset: u32,
237        bufs: &[libc::iovec],
238        tags: Option<&[u64]>,
239    ) -> io::Result<()> {
240        let nr = tags
241            .as_ref()
242            .map_or(bufs.len(), |tags| bufs.len().min(tags.len()));
243
244        let rr = sys::io_uring_rsrc_update2 {
245            nr: nr as _,
246            data: bufs.as_ptr() as _,
247            tags: tags.map(|tags| tags.as_ptr() as _).unwrap_or(0),
248            offset,
249            ..Default::default()
250        };
251
252        execute(
253            self.fd.as_raw_fd(),
254            sys::IORING_REGISTER_BUFFERS_UPDATE,
255            cast_ptr::<sys::io_uring_rsrc_update2>(&rr).cast(),
256            std::mem::size_of::<sys::io_uring_rsrc_update2>() as _,
257        )
258        .map(drop)
259    }
260
261    /// Variant of [`register_buffers`](Self::register_buffers)
262    /// with resource tagging.
263    ///
264    /// `tags` should be the same length as `bufs` and contain the
265    /// tag value corresponding to the buffer at the same index.
266    ///
267    /// If a tag is zero, then tagging for this particular resource
268    /// (a buffer in this case) is disabled. Otherwise, after the
269    /// resource had been unregistered and it's not used anymore,
270    /// a CQE will be posted with `user_data` set to the specified
271    /// tag and all other fields zeroed.
272    ///
273    /// Available since Linux 5.13.
274    ///
275    /// # Safety
276    ///
277    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
278    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
279    /// behaviour may occur.
280    pub unsafe fn register_buffers2(&self, bufs: &[libc::iovec], tags: &[u64]) -> io::Result<()> {
281        let rr = sys::io_uring_rsrc_register {
282            nr: bufs.len().min(tags.len()) as _,
283            data: bufs.as_ptr() as _,
284            tags: tags.as_ptr() as _,
285            ..Default::default()
286        };
287        execute(
288            self.fd.as_raw_fd(),
289            sys::IORING_REGISTER_BUFFERS2,
290            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
291            std::mem::size_of::<sys::io_uring_rsrc_register>() as _,
292        )
293        .map(drop)
294    }
295
296    /// Registers an empty table of nr fixed buffers buffers.
297    ///
298    /// These must be updated before use, using eg.
299    /// [`register_buffers_update`](Self::register_buffers_update).
300    ///
301    /// See [`register_buffers`](Self::register_buffers)
302    /// for more information about fixed buffers.
303    ///
304    /// Available since Linux 5.13.
305    pub fn register_buffers_sparse(&self, nr: u32) -> io::Result<()> {
306        let rr = sys::io_uring_rsrc_register {
307            nr,
308            flags: sys::IORING_RSRC_REGISTER_SPARSE,
309            ..Default::default()
310        };
311        execute(
312            self.fd.as_raw_fd(),
313            sys::IORING_REGISTER_BUFFERS2,
314            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
315            std::mem::size_of::<sys::io_uring_rsrc_register>() as _,
316        )
317        .map(drop)
318    }
319
320    /// Registers an empty file table of nr_files number of file descriptors. The sparse variant is
321    /// available in kernels 5.19 and later.
322    ///
323    /// Registering a file table is a prerequisite for using any request that
324    /// uses direct descriptors.
325    pub fn register_files_sparse(&self, nr: u32) -> io::Result<()> {
326        let rr = sys::io_uring_rsrc_register {
327            nr,
328            flags: sys::IORING_RSRC_REGISTER_SPARSE,
329            resv2: 0,
330            data: 0,
331            tags: 0,
332        };
333        execute(
334            self.fd.as_raw_fd(),
335            sys::IORING_REGISTER_FILES2,
336            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
337            mem::size_of::<sys::io_uring_rsrc_register>() as _,
338        )
339        .map(drop)
340    }
341
342    /// Register files for I/O. You can use the registered files with
343    /// [`Fixed`](crate::types::Fixed).
344    ///
345    /// Each fd may be -1, in which case it is considered "sparse", and can be filled in later with
346    /// [`register_files_update`](Self::register_files_update).
347    ///
348    /// Note that this will wait for the ring to idle; it will only return once all active requests
349    /// are complete. Use [`register_files_update`](Self::register_files_update) to avoid this.
350    pub fn register_files(&self, fds: &[RawFd]) -> io::Result<()> {
351        execute(
352            self.fd.as_raw_fd(),
353            sys::IORING_REGISTER_FILES,
354            fds.as_ptr().cast(),
355            fds.len() as _,
356        )
357        .map(drop)
358    }
359
360    /// This operation replaces existing files in the registered file set with new ones,
361    /// either turning a sparse entry (one where fd is equal to -1) into a real one, removing an existing entry (new one is set to -1),
362    /// or replacing an existing entry with a new existing entry. The `offset` parameter specifies
363    /// the offset into the list of registered files at which to start updating files.
364    ///
365    /// You can also perform this asynchronously with the
366    /// [`FilesUpdate`](crate::opcode::FilesUpdate) opcode.
367    pub fn register_files_update(&self, offset: u32, fds: &[RawFd]) -> io::Result<usize> {
368        let fu = sys::io_uring_files_update {
369            offset,
370            resv: 0,
371            fds: fds.as_ptr() as _,
372        };
373        let ret = execute(
374            self.fd.as_raw_fd(),
375            sys::IORING_REGISTER_FILES_UPDATE,
376            cast_ptr::<sys::io_uring_files_update>(&fu).cast(),
377            fds.len() as _,
378        )?;
379        Ok(ret as _)
380    }
381
382    /// Register an eventfd created by [`eventfd`](libc::eventfd) with the io_uring instance.
383    pub fn register_eventfd(&self, eventfd: RawFd) -> io::Result<()> {
384        execute(
385            self.fd.as_raw_fd(),
386            sys::IORING_REGISTER_EVENTFD,
387            cast_ptr::<RawFd>(&eventfd).cast(),
388            1,
389        )
390        .map(drop)
391    }
392
393    /// This works just like [`register_eventfd`](Self::register_eventfd), except notifications are
394    /// only posted for events that complete in an async manner, so requests that complete
395    /// immediately will not cause a notification.
396    pub fn register_eventfd_async(&self, eventfd: RawFd) -> io::Result<()> {
397        execute(
398            self.fd.as_raw_fd(),
399            sys::IORING_REGISTER_EVENTFD_ASYNC,
400            cast_ptr::<RawFd>(&eventfd).cast(),
401            1,
402        )
403        .map(drop)
404    }
405
406    /// Fill in the given [`Probe`] with information about the opcodes supported by io_uring on the
407    /// running kernel.
408    ///
409    /// # Examples
410    ///
411    // This is marked no_run as it is only available from Linux 5.6+, however the latest Ubuntu (on
412    // which CI runs) only has Linux 5.4.
413    /// ```no_run
414    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
415    /// let io_uring = io_uring::IoUring::new(1)?;
416    /// let mut probe = io_uring::Probe::new();
417    /// io_uring.submitter().register_probe(&mut probe)?;
418    ///
419    /// if probe.is_supported(io_uring::opcode::Read::CODE) {
420    ///     println!("Reading is supported!");
421    /// }
422    /// # Ok(())
423    /// # }
424    /// ```
425    pub fn register_probe(&self, probe: &mut Probe) -> io::Result<()> {
426        execute(
427            self.fd.as_raw_fd(),
428            sys::IORING_REGISTER_PROBE,
429            probe.as_mut_ptr() as *const _,
430            Probe::COUNT as _,
431        )
432        .map(drop)
433    }
434
435    /// Register credentials of the running application with io_uring, and get an id associated with
436    /// these credentials. This ID can then be [passed](crate::squeue::Entry::personality) into
437    /// submission queue entries to issue the request with this process' credentials.
438    ///
439    /// By default, if [`Parameters::is_feature_cur_personality`] is set then requests will use the
440    /// credentials of the task that called [`Submitter::enter`], otherwise they will use the
441    /// credentials of the task that originally registered the io_uring.
442    ///
443    /// [`Parameters::is_feature_cur_personality`]: crate::Parameters::is_feature_cur_personality
444    pub fn register_personality(&self) -> io::Result<u16> {
445        let id = execute(
446            self.fd.as_raw_fd(),
447            sys::IORING_REGISTER_PERSONALITY,
448            ptr::null(),
449            0,
450        )?;
451        Ok(id as u16)
452    }
453
454    /// Unregister all previously registered buffers.
455    ///
456    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
457    /// it will be cleaned up by the kernel automatically.
458    ///
459    /// Available since Linux 5.1.
460    pub fn unregister_buffers(&self) -> io::Result<()> {
461        execute(
462            self.fd.as_raw_fd(),
463            sys::IORING_UNREGISTER_BUFFERS,
464            ptr::null(),
465            0,
466        )
467        .map(drop)
468    }
469
470    /// Unregister all previously registered files.
471    ///
472    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
473    /// it will be cleaned up by the kernel automatically.
474    pub fn unregister_files(&self) -> io::Result<()> {
475        execute(
476            self.fd.as_raw_fd(),
477            sys::IORING_UNREGISTER_FILES,
478            ptr::null(),
479            0,
480        )
481        .map(drop)
482    }
483
484    /// Unregister an eventfd file descriptor to stop notifications.
485    pub fn unregister_eventfd(&self) -> io::Result<()> {
486        execute(
487            self.fd.as_raw_fd(),
488            sys::IORING_UNREGISTER_EVENTFD,
489            ptr::null(),
490            0,
491        )
492        .map(drop)
493    }
494
495    /// Unregister a previously registered personality.
496    pub fn unregister_personality(&self, personality: u16) -> io::Result<()> {
497        execute(
498            self.fd.as_raw_fd(),
499            sys::IORING_UNREGISTER_PERSONALITY,
500            ptr::null(),
501            personality as _,
502        )
503        .map(drop)
504    }
505
506    /// Permanently install a feature allowlist. Once this has been called, attempting to perform
507    /// an operation not on the allowlist will fail with `-EACCES`.
508    ///
509    /// This can only be called once, to prevent untrusted code from removing restrictions.
510    pub fn register_restrictions(&self, res: &mut [Restriction]) -> io::Result<()> {
511        execute(
512            self.fd.as_raw_fd(),
513            sys::IORING_REGISTER_RESTRICTIONS,
514            res.as_mut_ptr().cast(),
515            res.len() as _,
516        )
517        .map(drop)
518    }
519
520    /// Enable the rings of the io_uring instance if they have been disabled with
521    /// [`setup_r_disabled`](crate::Builder::setup_r_disabled).
522    pub fn register_enable_rings(&self) -> io::Result<()> {
523        execute(
524            self.fd.as_raw_fd(),
525            sys::IORING_REGISTER_ENABLE_RINGS,
526            ptr::null(),
527            0,
528        )
529        .map(drop)
530    }
531
532    /// Tell io_uring on what CPUs the async workers can run. By default, async workers
533    /// created by io_uring will inherit the CPU mask of its parent. This is usually
534    /// all the CPUs in the system, unless the parent is being run with a limited set.
535    pub fn register_iowq_aff(&self, cpu_set: &libc::cpu_set_t) -> io::Result<()> {
536        execute(
537            self.fd.as_raw_fd(),
538            sys::IORING_REGISTER_IOWQ_AFF,
539            cpu_set as *const _ as *const libc::c_void,
540            mem::size_of::<libc::cpu_set_t>() as u32,
541        )
542        .map(drop)
543    }
544
545    /// Undoes a CPU mask previously set with register_iowq_aff
546    pub fn unregister_iowq_aff(&self) -> io::Result<()> {
547        execute(
548            self.fd.as_raw_fd(),
549            sys::IORING_UNREGISTER_IOWQ_AFF,
550            ptr::null(),
551            0,
552        )
553        .map(drop)
554    }
555
556    /// Get and/or set the limit for number of io_uring worker threads per NUMA
557    /// node. `max[0]` holds the limit for bounded workers, which process I/O
558    /// operations expected to be bound in time, that is I/O on regular files or
559    /// block devices. While `max[1]` holds the limit for unbounded workers,
560    /// which carry out I/O operations that can never complete, for instance I/O
561    /// on sockets. Passing `0` does not change the current limit. Returns
562    /// previous limits on success.
563    pub fn register_iowq_max_workers(&self, max: &mut [u32; 2]) -> io::Result<()> {
564        execute(
565            self.fd.as_raw_fd(),
566            sys::IORING_REGISTER_IOWQ_MAX_WORKERS,
567            max.as_mut_ptr().cast(),
568            max.len() as _,
569        )
570        .map(drop)
571    }
572
573    /// Register buffer ring for provided buffers.
574    ///
575    /// Details can be found in the io_uring_register_buf_ring.3 man page.
576    ///
577    /// If the register command is not supported, or the ring_entries value exceeds
578    /// 32768, the InvalidInput error is returned.
579    ///
580    /// Available since 5.19.
581    ///
582    /// # Safety
583    ///
584    /// Developers must ensure that the `ring_addr` and its length represented by `ring_entries`
585    /// are valid and will be valid until the bgid is unregistered or the ring destroyed,
586    /// otherwise undefined behaviour may occur.
587    #[deprecated(note = "please use `register_buf_ring_with_flags` instead")]
588    pub unsafe fn register_buf_ring(
589        &self,
590        ring_addr: u64,
591        ring_entries: u16,
592        bgid: u16,
593    ) -> io::Result<()> {
594        self.register_buf_ring_with_flags(ring_addr, ring_entries, bgid, 0)
595    }
596
597    /// Register buffer ring for provided buffers.
598    ///
599    /// Details can be found in the io_uring_register_buf_ring.3 man page.
600    ///
601    /// If the register command is not supported, or the ring_entries value exceeds
602    /// 32768, the InvalidInput error is returned.
603    ///
604    /// Available since 5.19.
605    ///
606    /// # Safety
607    ///
608    /// Developers must ensure that the `ring_addr` and its length represented by `ring_entries`
609    /// are valid and will be valid until the bgid is unregistered or the ring destroyed,
610    /// otherwise undefined behaviour may occur.
611    pub unsafe fn register_buf_ring_with_flags(
612        &self,
613        ring_addr: u64,
614        ring_entries: u16,
615        bgid: u16,
616        flags: u16,
617    ) -> io::Result<()> {
618        // The interface type for ring_entries is u32 but the same interface only allows a u16 for
619        // the tail to be specified, so to try and avoid further confusion, we limit the
620        // ring_entries to u16 here too. The value is actually limited to 2^15 (32768) but we can
621        // let the kernel enforce that.
622        let arg = sys::io_uring_buf_reg {
623            ring_addr,
624            ring_entries: ring_entries as _,
625            bgid,
626            flags,
627            ..Default::default()
628        };
629        execute(
630            self.fd.as_raw_fd(),
631            sys::IORING_REGISTER_PBUF_RING,
632            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
633            1,
634        )
635        .map(drop)
636    }
637
638    /// Unregister a previously registered buffer ring.
639    ///
640    /// Available since 5.19.
641    pub fn unregister_buf_ring(&self, bgid: u16) -> io::Result<()> {
642        let arg = sys::io_uring_buf_reg {
643            ring_addr: 0,
644            ring_entries: 0,
645            bgid,
646            ..Default::default()
647        };
648        execute(
649            self.fd.as_raw_fd(),
650            sys::IORING_UNREGISTER_PBUF_RING,
651            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
652            1,
653        )
654        .map(drop)
655    }
656
657    /// Performs a synchronous cancellation request, similar to [AsyncCancel](crate::opcode::AsyncCancel),
658    /// except that it completes synchronously.
659    ///
660    /// Cancellation can target a specific request, or all requests matching some criteria. The
661    /// [`CancelBuilder`] builder supports describing the match criteria for cancellation.
662    ///
663    /// An optional `timeout` can be provided to specify how long to wait for matched requests to be
664    /// canceled. If no timeout is provided, the default is to wait indefinitely.
665    ///
666    /// ### Errors
667    ///
668    /// If no requests are matched, returns:
669    ///
670    /// [io::ErrorKind::NotFound]: `No such file or directory (os error 2)`
671    ///
672    /// If a timeout is supplied, and the timeout elapses prior to all requests being canceled, returns:
673    ///
674    /// [io::ErrorKind::Uncategorized]: `Timer expired (os error 62)`
675    ///
676    /// ### Notes
677    ///
678    /// Only requests which have been submitted to the ring will be considered for cancellation. Requests
679    /// which have been written to the SQ, but not submitted, will not be canceled.
680    ///
681    /// Available since 6.0.
682    pub fn register_sync_cancel(
683        &self,
684        timeout: Option<Timespec>,
685        builder: CancelBuilder,
686    ) -> io::Result<()> {
687        let timespec = timeout.map(|ts| ts.0).unwrap_or(sys::__kernel_timespec {
688            tv_sec: -1,
689            tv_nsec: -1,
690        });
691        let user_data = builder.user_data.unwrap_or(0);
692        let flags = builder.flags.bits();
693        let fd = builder.to_fd();
694
695        let arg = sys::io_uring_sync_cancel_reg {
696            addr: user_data,
697            fd,
698            flags,
699            timeout: timespec,
700            ..Default::default()
701        };
702
703        execute(
704            self.fd.as_raw_fd(),
705            sys::IORING_REGISTER_SYNC_CANCEL,
706            cast_ptr::<sys::io_uring_sync_cancel_reg>(&arg).cast(),
707            1,
708        )
709        .map(drop)
710    }
711
712    /// Register a netdev hw rx queue for zerocopy.
713    ///
714    /// Available since 6.15.
715    pub fn register_ifq(&self, reg: &sys::io_uring_zcrx_ifq_reg) -> io::Result<()> {
716        execute(
717            self.fd.as_raw_fd(),
718            sys::IORING_REGISTER_ZCRX_IFQ,
719            cast_ptr::<sys::io_uring_zcrx_ifq_reg>(reg) as _,
720            1,
721        )
722        .map(drop)
723    }
724}