rustix_uring/
submit.rs

1use core::sync::atomic;
2use core::{ffi, mem, ptr};
3
4use rustix::fd::{OwnedFd, RawFd};
5use rustix::io;
6
7use crate::register::{execute, Probe};
8use crate::sys;
9use crate::types::{CancelBuilder, Timespec};
10use crate::util::cast_ptr;
11use crate::Parameters;
12
13use crate::register::Restriction;
14
15use crate::types;
16
17/// Interface for submitting submission queue events in an io_uring instance to the kernel for
18/// executing and registering files or buffers with the instance.
19///
20/// io_uring supports both directly performing I/O on buffers and file descriptors and registering
21/// them beforehand. Registering is slow, but it makes performing the actual I/O much faster.
22pub struct Submitter<'a> {
23    fd: &'a OwnedFd,
24    params: &'a Parameters,
25
26    sq_head: *const atomic::AtomicU32,
27    sq_tail: *const atomic::AtomicU32,
28    sq_flags: *const atomic::AtomicU32,
29}
30
31impl<'a> Submitter<'a> {
32    #[inline]
33    pub(crate) const fn new(
34        fd: &'a OwnedFd,
35        params: &'a Parameters,
36        sq_head: *const atomic::AtomicU32,
37        sq_tail: *const atomic::AtomicU32,
38        sq_flags: *const atomic::AtomicU32,
39    ) -> Submitter<'a> {
40        Submitter {
41            fd,
42            params,
43            sq_head,
44            sq_tail,
45            sq_flags,
46        }
47    }
48
49    #[inline]
50    fn sq_len(&self) -> usize {
51        unsafe {
52            let head = (*self.sq_head).load(atomic::Ordering::Acquire);
53            let tail = (*self.sq_tail).load(atomic::Ordering::Acquire);
54
55            tail.wrapping_sub(head) as usize
56        }
57    }
58
59    /// Whether the kernel thread has gone to sleep because it waited for too long without
60    /// submission queue entries.
61    #[inline]
62    fn sq_need_wakeup(&self) -> bool {
63        unsafe {
64            (*self.sq_flags).load(atomic::Ordering::Relaxed)
65                & sys::IoringSqFlags::NEED_WAKEUP.bits()
66                != 0
67        }
68    }
69
70    /// CQ ring is overflown
71    fn sq_cq_overflow(&self) -> bool {
72        unsafe {
73            (*self.sq_flags).load(atomic::Ordering::Relaxed)
74                & sys::IoringSqFlags::CQ_OVERFLOW.bits()
75                != 0
76        }
77    }
78
79    /// Initiate and/or complete asynchronous I/O. This is a low-level wrapper around
80    /// `io_uring_enter` - see `man io_uring_enter` (or [its online
81    /// version](https://manpages.debian.org/unstable/liburing-dev/io_uring_enter.2.en.html) for
82    /// more details.
83    ///
84    /// You will probably want to use a more high-level API such as
85    /// [`submit`](Self::submit) or [`submit_and_wait`](Self::submit_and_wait).
86    ///
87    /// # Safety
88    ///
89    /// This provides a raw interface so developer must ensure that parameters are correct.
90    pub unsafe fn enter_sigmask(
91        &self,
92        to_submit: u32,
93        min_complete: u32,
94        flag: sys::IoringEnterFlags,
95        arg: Option<&sys::KernelSigSet>,
96    ) -> io::Result<usize> {
97        let result = sys::io_uring_enter_sigmask(self.fd, to_submit, min_complete, flag, arg)?;
98        Ok(result as _)
99    }
100
101    /// Initiate and/or complete asynchronous I/O. This is a low-level wrapper around
102    /// `io_uring_enter` - see `man io_uring_enter` (or [its online
103    /// version](https://manpages.debian.org/unstable/liburing-dev/io_uring_enter.2.en.html) for
104    /// more details.
105    ///
106    /// You will probably want to use a more high-level API such as
107    /// [`submit`](Self::submit) or [`submit_and_wait`](Self::submit_and_wait).
108    ///
109    /// # Safety
110    ///
111    /// This provides a raw interface so developer must ensure that parameters are correct.
112    pub unsafe fn enter_arg(
113        &self,
114        to_submit: u32,
115        min_complete: u32,
116        flag: sys::IoringEnterFlags,
117        arg: Option<&sys::io_uring_getevents_arg>,
118    ) -> io::Result<usize> {
119        let result = sys::io_uring_enter_arg(self.fd, to_submit, min_complete, flag, arg)?;
120        Ok(result as _)
121    }
122
123    /// Submit all queued submission queue events to the kernel.
124    #[inline]
125    pub fn submit(&self) -> io::Result<usize> {
126        self.submit_and_wait(0)
127    }
128
129    /// Submit all queued submission queue events to the kernel and wait for at least `want`
130    /// completion events to complete.
131    pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
132        let len = self.sq_len();
133        let mut flags = sys::IoringEnterFlags::empty();
134
135        // This logic suffers from the fact the sq_cq_overflow and sq_need_wakeup
136        // each cause an atomic load of the same variable, self.sq_flags.
137        // In the hottest paths, when a server is running with sqpoll,
138        // this is going to be hit twice, when once would be sufficient.
139        // However, consider that the `SeqCst` barrier required for interpreting
140        // the IORING_ENTER_SQ_WAKEUP bit is required in all paths where sqpoll
141        // is setup when consolidating the reads.
142
143        if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() {
144            flags |= sys::IoringEnterFlags::GETEVENTS;
145        }
146
147        if self.params.is_setup_sqpoll() {
148            // See discussion in [`SubmissionQueue::need_wakeup`].
149            atomic::fence(atomic::Ordering::SeqCst);
150            if self.sq_need_wakeup() {
151                flags |= sys::IoringEnterFlags::SQ_WAKEUP;
152            } else if want == 0 {
153                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
154                // it to process events or wake it up
155                return Ok(len);
156            }
157        }
158
159        unsafe { self.enter_sigmask(len as _, want as _, flags, None) }
160    }
161
162    pub fn submit_with_args(
163        &self,
164        want: usize,
165        args: &types::SubmitArgs<'_, '_>,
166    ) -> io::Result<usize> {
167        let len = self.sq_len();
168        let mut flags = sys::IoringEnterFlags::EXT_ARG;
169
170        if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() {
171            flags |= sys::IoringEnterFlags::GETEVENTS;
172        }
173
174        if self.params.is_setup_sqpoll() {
175            // See discussion in [`SubmissionQueue::need_wakeup`].
176            atomic::fence(atomic::Ordering::SeqCst);
177            if self.sq_need_wakeup() {
178                flags |= sys::IoringEnterFlags::SQ_WAKEUP;
179            } else if want == 0 {
180                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
181                // it to process events or wake it up
182                return Ok(len);
183            }
184        }
185
186        unsafe { self.enter_arg(len as _, want as _, flags, Some(&args.args)) }
187    }
188
189    /// Wait for the submission queue to have free entries.
190    pub fn squeue_wait(&self) -> io::Result<usize> {
191        unsafe { self.enter_sigmask(0, 0, sys::IoringEnterFlags::SQ_WAIT, None) }
192    }
193
194    /// Register in-memory fixed buffers for I/O with the kernel. You can use these buffers with the
195    /// [`ReadFixed`](crate::opcode::ReadFixed) and [`WriteFixed`](crate::opcode::WriteFixed)
196    /// operations.
197    ///
198    /// # Safety
199    ///
200    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
201    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
202    /// behaviour may occur.
203    pub unsafe fn register_buffers(&self, bufs: &[sys::iovec]) -> io::Result<()> {
204        execute(
205            self.fd,
206            sys::IoringRegisterOp::RegisterBuffers,
207            bufs.as_ptr().cast(),
208            bufs.len() as _,
209        )
210    }
211
212    /// Update a range of fixed buffers starting at `offset`.
213    ///
214    /// This is required to use buffers registered using
215    /// [`register_buffers_sparse`](Self::register_buffers_sparse),
216    /// although it can be also be used with [`register_buffers`](Self::register_buffers).
217    ///
218    /// See [`register_buffers2`](Self::register_buffers2)
219    /// for more information about resource tagging.
220    ///
221    /// Available since Linux 5.13.
222    ///
223    /// # Safety
224    ///
225    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
226    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
227    /// behaviour may occur.
228    pub unsafe fn register_buffers_update(
229        &self,
230        offset: u32,
231        bufs: &[sys::iovec],
232        tags: Option<&[u64]>,
233    ) -> io::Result<()> {
234        let nr = tags
235            .as_ref()
236            .map_or(bufs.len(), |tags| bufs.len().min(tags.len()));
237
238        let mut rr = sys::io_uring_rsrc_update2::default();
239        rr.nr = nr as _;
240        rr.data = sys::io_uring_ptr::new(bufs.as_ptr() as _);
241        rr.tags = tags
242            .map(|tags| sys::io_uring_ptr::new(tags.as_ptr() as _))
243            .unwrap_or(sys::io_uring_ptr::null());
244        rr.offset = offset;
245
246        execute(
247            self.fd,
248            sys::IoringRegisterOp::RegisterBuffersUpdate,
249            cast_ptr::<sys::io_uring_rsrc_update2>(&rr).cast(),
250            mem::size_of::<sys::io_uring_rsrc_update2>() as _,
251        )
252        .map(drop)
253    }
254
255    /// Variant of [`register_buffers`](Self::register_buffers)
256    /// with resource tagging.
257    ///
258    /// `tags` should be the same length as `bufs` and contain the
259    /// tag value corresponding to the buffer at the same index.
260    ///
261    /// If a tag is zero, then tagging for this particular resource
262    /// (a buffer in this case) is disabled. Otherwise, after the
263    /// resource had been unregistered and it's not used anymore,
264    /// a CQE will be posted with `user_data` set to the specified
265    /// tag and all other fields zeroed.
266    ///
267    /// Available since Linux 5.13.
268    ///
269    /// # Safety
270    ///
271    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
272    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
273    /// behaviour may occur.
274    pub unsafe fn register_buffers2(&self, bufs: &[sys::iovec], tags: &[u64]) -> io::Result<()> {
275        let mut rr = sys::io_uring_rsrc_register::default();
276        rr.nr = bufs.len().min(tags.len()) as _;
277        rr.data = sys::io_uring_ptr::new(bufs.as_ptr() as _);
278        rr.tags = sys::io_uring_ptr::new(tags.as_ptr() as _);
279        execute(
280            self.fd,
281            sys::IoringRegisterOp::RegisterBuffers2,
282            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
283            mem::size_of::<sys::io_uring_rsrc_register>() as _,
284        )
285        .map(drop)
286    }
287
288    /// Registers an empty table of nr fixed buffers buffers.
289    ///
290    /// These must be updated before use, using eg.
291    /// [`register_buffers_update`](Self::register_buffers_update).
292    ///
293    /// See [`register_buffers`](Self::register_buffers)
294    /// for more information about fixed buffers.
295    ///
296    /// Available since Linux 5.13.
297    pub fn register_buffers_sparse(&self, nr: u32) -> io::Result<()> {
298        let mut rr = sys::io_uring_rsrc_register::default();
299        rr.nr = nr;
300        rr.flags = sys::IoringRsrcFlags::REGISTER_SPARSE;
301        execute(
302            self.fd,
303            sys::IoringRegisterOp::RegisterBuffers2,
304            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
305            mem::size_of::<sys::io_uring_rsrc_register>() as _,
306        )
307        .map(drop)
308    }
309
310    /// Registers an empty file table of nr_files number of file descriptors. The sparse variant is
311    /// available in kernels 5.19 and later.
312    ///
313    /// Registering a file table is a prerequisite for using any request that
314    /// uses direct descriptors.
315    pub fn register_files_sparse(&self, nr: u32) -> io::Result<()> {
316        let mut rr = sys::io_uring_rsrc_register::default();
317        rr.nr = nr;
318        rr.flags = sys::IoringRsrcFlags::REGISTER_SPARSE;
319        rr.data = sys::io_uring_ptr::null();
320        rr.tags = sys::io_uring_ptr::null();
321        execute(
322            self.fd,
323            sys::IoringRegisterOp::RegisterFiles2,
324            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
325            mem::size_of::<sys::io_uring_rsrc_register>() as _,
326        )
327    }
328
329    /// Register files for I/O. You can use the registered files with
330    /// [`Fixed`](crate::types::Fixed).
331    ///
332    /// Each fd may be -1, in which case it is considered "sparse", and can be filled in later with
333    /// [`register_files_update`](Self::register_files_update).
334    ///
335    /// Note that this will wait for the ring to idle; it will only return once all active requests
336    /// are complete. Use [`register_files_update`](Self::register_files_update) to avoid this.
337    pub fn register_files(&self, fds: &[RawFd]) -> io::Result<()> {
338        execute(
339            self.fd,
340            sys::IoringRegisterOp::RegisterFiles,
341            fds.as_ptr().cast(),
342            fds.len() as _,
343        )
344    }
345
346    /// This operation replaces existing files in the registered file set with new ones,
347    /// either turning a sparse entry (one where fd is equal to -1) into a real one, removing an existing entry (new one is set to -1),
348    /// or replacing an existing entry with a new existing entry. The `offset` parameter specifies
349    /// the offset into the list of registered files at which to start updating files.
350    ///
351    /// You can also perform this asynchronously with the
352    /// [`FilesUpdate`](crate::opcode::FilesUpdate) opcode.
353    pub fn register_files_update(&self, offset: u32, fds: &[RawFd]) -> io::Result<()> {
354        let mut fu = sys::io_uring_files_update::default();
355        fu.offset = offset;
356        fu.fds = sys::io_uring_ptr::new(fds.as_ptr() as _);
357        execute(
358            self.fd,
359            sys::IoringRegisterOp::RegisterFilesUpdate,
360            cast_ptr::<sys::io_uring_files_update>(&fu).cast(),
361            fds.len() as _,
362        )
363    }
364
365    /// Register an eventfd created by [`eventfd`](libc::eventfd) with the io_uring instance.
366    pub fn register_eventfd(&self, eventfd: RawFd) -> io::Result<()> {
367        execute(
368            self.fd,
369            sys::IoringRegisterOp::RegisterEventfd,
370            cast_ptr::<RawFd>(&eventfd).cast(),
371            1,
372        )
373    }
374
375    /// This works just like [`register_eventfd`](Self::register_eventfd), except notifications are
376    /// only posted for events that complete in an async manner, so requests that complete
377    /// immediately will not cause a notification.
378    pub fn register_eventfd_async(&self, eventfd: RawFd) -> io::Result<()> {
379        execute(
380            self.fd,
381            sys::IoringRegisterOp::RegisterEventfdAsync,
382            cast_ptr::<RawFd>(&eventfd).cast(),
383            1,
384        )
385    }
386
387    /// Fill in the given [`Probe`] with information about the opcodes supported by io_uring on the
388    /// running kernel.
389    ///
390    /// # Examples
391    ///
392    // This is marked no_run as it is only available from Linux 5.6+, however the latest Ubuntu (on
393    // which CI runs) only has Linux 5.4.
394    /// ```no_run
395    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
396    /// let io_uring = rustix_uring::IoUring::new(1)?;
397    /// let mut probe = rustix_uring::Probe::new();
398    /// io_uring.submitter().register_probe(&mut probe)?;
399    ///
400    /// if probe.is_supported(rustix_uring::opcode::Read::CODE) {
401    ///     println!("Reading is supported!");
402    /// }
403    /// # Ok(())
404    /// # }
405    /// ```
406    pub fn register_probe(&self, probe: &mut Probe) -> io::Result<()> {
407        execute(
408            self.fd,
409            sys::IoringRegisterOp::RegisterProbe,
410            probe.as_mut_ptr() as *const _,
411            Probe::COUNT as _,
412        )
413    }
414
415    /// Register credentials of the running application with io_uring, and get an id associated with
416    /// these credentials. This ID can then be [passed](crate::squeue::Entry::personality) into
417    /// submission queue entries to issue the request with this process' credentials.
418    ///
419    /// By default, if [`Parameters::is_feature_cur_personality`] is set then requests will use the
420    /// credentials of the task that called [`Submitter::enter`], otherwise they will use the
421    /// credentials of the task that originally registered the io_uring.
422    ///
423    /// [`Parameters::is_feature_cur_personality`]: crate::Parameters::is_feature_cur_personality
424    pub fn register_personality(&self) -> io::Result<()> {
425        execute(
426            self.fd,
427            sys::IoringRegisterOp::RegisterPersonality,
428            ptr::null(),
429            0,
430        )?;
431        Ok(())
432    }
433
434    /// Unregister all previously registered buffers.
435    ///
436    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
437    /// it will be cleaned up by the kernel automatically.
438    ///
439    /// Available since Linux 5.1.
440    pub fn unregister_buffers(&self) -> io::Result<()> {
441        execute(
442            self.fd,
443            sys::IoringRegisterOp::UnregisterBuffers,
444            ptr::null(),
445            0,
446        )
447    }
448
449    /// Unregister all previously registered files.
450    ///
451    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
452    /// it will be cleaned up by the kernel automatically.
453    pub fn unregister_files(&self) -> io::Result<()> {
454        execute(
455            self.fd,
456            sys::IoringRegisterOp::UnregisterFiles,
457            ptr::null(),
458            0,
459        )
460    }
461
462    /// Unregister an eventfd file descriptor to stop notifications.
463    pub fn unregister_eventfd(&self) -> io::Result<()> {
464        execute(
465            self.fd,
466            sys::IoringRegisterOp::UnregisterEventfd,
467            ptr::null(),
468            0,
469        )
470    }
471
472    /// Unregister a previously registered personality.
473    pub fn unregister_personality(&self, personality: u16) -> io::Result<()> {
474        execute(
475            self.fd,
476            sys::IoringRegisterOp::UnregisterPersonality,
477            ptr::null(),
478            personality as _,
479        )
480    }
481
482    /// Permanently install a feature allowlist. Once this has been called, attempting to perform
483    /// an operation not on the allowlist will fail with `-EACCES`.
484    ///
485    /// This can only be called once, to prevent untrusted code from removing restrictions.
486    pub fn register_restrictions(&self, res: &mut [Restriction]) -> io::Result<()> {
487        execute(
488            self.fd,
489            sys::IoringRegisterOp::RegisterRestrictions,
490            res.as_mut_ptr().cast(),
491            res.len() as _,
492        )
493    }
494
495    /// Enable the rings of the io_uring instance if they have been disabled with
496    /// [`setup_r_disabled`](crate::Builder::setup_r_disabled).
497    pub fn register_enable_rings(&self) -> io::Result<()> {
498        execute(
499            self.fd,
500            sys::IoringRegisterOp::RegisterEnableRings,
501            ptr::null(),
502            0,
503        )
504    }
505
506    /// Tell io_uring on what CPUs the async workers can run. By default, async workers
507    /// created by io_uring will inherit the CPU mask of its parent. This is usually
508    /// all the CPUs in the system, unless the parent is being run with a limited set.
509    pub fn register_iowq_aff(&self, cpu_set: &rustix::thread::CpuSet) -> io::Result<()> {
510        execute(
511            self.fd,
512            sys::IoringRegisterOp::RegisterIowqAff,
513            (cpu_set as *const rustix::thread::CpuSet).cast(),
514            mem::size_of::<rustix::thread::CpuSet>() as u32,
515        )
516        .map(drop)
517    }
518
519    /// Undoes a CPU mask previously set with register_iowq_aff
520    pub fn unregister_iowq_aff(&self) -> io::Result<()> {
521        execute(
522            self.fd,
523            sys::IoringRegisterOp::UnregisterIowqAff,
524            ptr::null(),
525            0,
526        )
527        .map(drop)
528    }
529
530    /// Get and/or set the limit for number of io_uring worker threads per NUMA
531    /// node. `max[0]` holds the limit for bounded workers, which process I/O
532    /// operations expected to be bound in time, that is I/O on regular files or
533    /// block devices. While `max[1]` holds the limit for unbounded workers,
534    /// which carry out I/O operations that can never complete, for instance I/O
535    /// on sockets. Passing `0` does not change the current limit. Returns
536    /// previous limits on success.
537    pub fn register_iowq_max_workers(&self, max: &mut [u32; 2]) -> io::Result<()> {
538        execute(
539            self.fd,
540            sys::IoringRegisterOp::RegisterIowqMaxWorkers,
541            max.as_mut_ptr().cast(),
542            max.len() as _,
543        )
544    }
545
546    /// Register buffer ring for provided buffers.
547    ///
548    /// Details can be found in the io_uring_register_buf_ring.3 man page.
549    ///
550    /// If the register command is not supported, or the ring_entries value exceeds
551    /// 32768, the InvalidInput error is returned.
552    ///
553    /// Available since 5.19.
554    ///
555    /// # Safety
556    ///
557    /// Developers must ensure that the `ring_addr` and its length represented by `ring_entries`
558    /// are valid and will be valid until the bgid is unregistered or the ring destroyed,
559    /// otherwise undefined behaviour may occur.
560    pub unsafe fn register_buf_ring(
561        &self,
562        ring_addr: *mut ffi::c_void,
563        ring_entries: u16,
564        bgid: u16,
565    ) -> io::Result<()> {
566        // The interface type for ring_entries is u32 but the same interface only allows a u16 for
567        // the tail to be specified, so to try and avoid further confusion, we limit the
568        // ring_entries to u16 here too. The value is actually limited to 2^15 (32768) but we can
569        // let the kernel enforce that.
570        let mut arg = sys::io_uring_buf_reg::default();
571        arg.ring_addr = ring_addr.into();
572        arg.ring_entries = ring_entries as _;
573        arg.bgid = bgid;
574        execute(
575            self.fd,
576            sys::IoringRegisterOp::RegisterPbufRing,
577            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
578            1,
579        )
580    }
581
582    /// Unregister a previously registered buffer ring.
583    ///
584    /// Available since 5.19.
585    pub fn unregister_buf_ring(&self, bgid: u16) -> io::Result<()> {
586        let mut arg = sys::io_uring_buf_reg::default();
587        arg.ring_addr = sys::io_uring_ptr::null();
588        arg.ring_entries = 0;
589        arg.bgid = bgid;
590        execute(
591            self.fd,
592            sys::IoringRegisterOp::UnregisterPbufRing,
593            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
594            1,
595        )
596    }
597
598    /// Performs a synchronous cancellation request, similar to [AsyncCancel](crate::opcode::AsyncCancel),
599    /// except that it completes synchronously.
600    ///
601    /// Cancellation can target a specific request, or all requests matching some criteria. The
602    /// [`CancelBuilder`] builder supports describing the match criteria for cancellation.
603    ///
604    /// An optional `timeout` can be provided to specify how long to wait for matched requests to be
605    /// canceled. If no timeout is provided, the default is to wait indefinitely.
606    ///
607    /// ### Errors
608    ///
609    /// If no requests are matched, returns:
610    ///
611    /// [io::ErrorKind::NotFound]: `No such file or directory (os error 2)`
612    ///
613    /// If a timeout is supplied, and the timeout elapses prior to all requests being canceled, returns:
614    ///
615    /// [io::ErrorKind::Uncategorized]: `Timer expired (os error 62)`
616    ///
617    /// ### Notes
618    ///
619    /// Only requests which have been submitted to the ring will be considered for cancellation. Requests
620    /// which have been written to the SQ, but not submitted, will not be canceled.
621    ///
622    /// Available since 6.0.
623    pub fn register_sync_cancel(
624        &self,
625        timeout: Option<Timespec>,
626        builder: CancelBuilder,
627    ) -> io::Result<()> {
628        let timespec = timeout.map(|ts| ts.0).unwrap_or(sys::Timespec {
629            tv_sec: -1,
630            tv_nsec: -1,
631        });
632        let user_data = builder.user_data;
633        let flags = sys::IoringAsyncCancelFlags::from_bits_retain(builder.flags.bits());
634        let fd = builder.to_fd();
635
636        let mut arg = sys::io_uring_sync_cancel_reg::default();
637        arg.addr = user_data;
638        arg.fd = fd;
639        arg.flags = flags;
640        arg.timeout = timespec;
641
642        execute(
643            self.fd,
644            sys::IoringRegisterOp::RegisterSyncCancel,
645            cast_ptr::<sys::io_uring_sync_cancel_reg>(&arg).cast(),
646            1,
647        )
648    }
649}