rustix_uring/
types.rs

1//! Common Linux types not provided by libc.
2
3pub(crate) mod sealed {
4    use super::{Fd, Fixed};
5    use rustix::fd::RawFd;
6
7    #[derive(Debug)]
8    pub enum Target {
9        Fd(RawFd),
10        Fixed(u32),
11    }
12
13    pub trait UseFd: Sized {
14        fn into(self) -> RawFd;
15    }
16
17    pub trait UseFixed: Sized {
18        fn into(self) -> Target;
19    }
20
21    impl UseFd for Fd {
22        #[inline]
23        fn into(self) -> RawFd {
24            self.0
25        }
26    }
27
28    impl UseFixed for Fd {
29        #[inline]
30        fn into(self) -> Target {
31            Target::Fd(self.0)
32        }
33    }
34
35    impl UseFixed for Fixed {
36        #[inline]
37        fn into(self) -> Target {
38            Target::Fixed(self.0)
39        }
40    }
41}
42
43use crate::sys;
44use crate::util::{cast_ptr, unwrap_nonzero, unwrap_u32};
45use bitflags::bitflags;
46use core::convert::TryFrom;
47use core::marker::PhantomData;
48use core::num::NonZeroU32;
49use rustix::fd::RawFd;
50
51pub use sys::ReadWriteFlags as RwFlags;
52pub use sys::{
53    iovec, Advice, AtFlags, EpollEvent, Mode, MsgHdr, OFlags, RenameFlags, ResolveFlags,
54    SocketAddrLen, SocketAddrOpaque, Statx, StatxFlags,
55};
56
57/// A file descriptor that has not been registered with io_uring.
58#[derive(Debug, Clone, Copy)]
59#[repr(transparent)]
60pub struct Fd(pub RawFd);
61
62/// A file descriptor that has been registered with io_uring using
63/// [`Submitter::register_files`](crate::Submitter::register_files) or [`Submitter::register_files_sparse`](crate::Submitter::register_files_sparse).
64/// This can reduce overhead compared to using [`Fd`] in some cases.
65#[derive(Debug, Clone, Copy)]
66#[repr(transparent)]
67pub struct Fixed(pub u32);
68
69bitflags! {
70    /// Options for [`Timeout`](super::Timeout).
71    ///
72    /// The default behavior is to treat the timespec as a relative time interval. `flags` may
73    /// contain [`types::TimeoutFlags::ABS`] to indicate the timespec represents an absolute
74    /// time. When an absolute time is being specified, the kernel will use its monotonic clock
75    /// unless one of the following flags is set (they may not both be set):
76    /// [`types::TimeoutFlags::BOOTTIME`] or [`types::TimeoutFlags::REALTIME`].
77    ///
78    /// The default behavior when the timeout expires is to return a CQE with -libc::ETIME in
79    /// the res field. To change this behavior to have zero returned, include
80    /// [`types::TimeoutFlags::ETIME_SUCCESS`].
81    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
82    pub struct TimeoutFlags: u32 {
83        const ABS = sys::IoringTimeoutFlags::ABS.bits();
84
85        const BOOTTIME = sys::IoringTimeoutFlags::BOOTTIME.bits();
86
87        const REALTIME = sys::IoringTimeoutFlags::REALTIME.bits();
88
89        const LINK_TIMEOUT_UPDATE = sys::IoringTimeoutFlags::UPDATE.bits();
90
91        const ETIME_SUCCESS = sys::IoringTimeoutFlags::ETIME_SUCCESS.bits();
92    }
93}
94
95bitflags! {
96    /// Options for [`Fsync`](super::Fsync).
97    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
98    pub struct FsyncFlags: u32 {
99        const DATASYNC = sys::IoringFsyncFlags::DATASYNC.bits();
100    }
101}
102
103bitflags! {
104    /// Options for [`AsyncCancel`](super::AsyncCancel) and
105    /// [`Submitter::register_sync_cancel`](super::Submitter::register_sync_cancel).
106    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
107    pub(crate) struct AsyncCancelFlags: u32 {
108        /// Cancel all requests that match the given criteria, rather
109        /// than just canceling the first one found.
110        ///
111        /// Available since 5.19.
112        const ALL = sys::IoringAsyncCancelFlags::ALL.bits();
113
114        /// Match based on the file descriptor used in the original
115        /// request rather than the user_data.
116        ///
117        /// Available since 5.19.
118        const FD = sys::IoringAsyncCancelFlags::FD.bits();
119
120        /// Match any request in the ring, regardless of user_data or
121        /// file descriptor.  Can be used to cancel any pending
122        /// request in the ring.
123        ///
124        /// Available since 5.19.
125        const ANY = sys::IoringAsyncCancelFlags::ANY.bits();
126
127        /// Match based on the fixed file descriptor used in the original
128        /// request rather than the user_data.
129        ///
130        /// Available since 6.0
131        const FD_FIXED = sys::IoringAsyncCancelFlags::FD_FIXED.bits();
132    }
133}
134
135/// Wrapper around `open_how` as used in [the `openat2(2)` system
136/// call](https://man7.org/linux/man-pages/man2/openat2.2.html).
137#[derive(Default, Debug, Clone, Copy)]
138#[repr(transparent)]
139pub struct OpenHow(sys::open_how);
140
141impl OpenHow {
142    pub const fn new() -> Self {
143        OpenHow(sys::open_how::zeroed())
144    }
145
146    pub const fn flags(mut self, flags: OFlags) -> Self {
147        self.0.flags = flags.bits() as _;
148        self
149    }
150
151    pub const fn mode(mut self, mode: Mode) -> Self {
152        self.0.mode = mode.bits() as _;
153        self
154    }
155
156    pub const fn resolve(mut self, resolve: ResolveFlags) -> Self {
157        self.0.resolve = resolve;
158        self
159    }
160}
161
162#[derive(Debug, Clone, Copy)]
163#[repr(transparent)]
164pub struct Timespec(pub(crate) sys::Timespec);
165
166impl Default for Timespec {
167    fn default() -> Self {
168        Self(sys::Timespec {
169            tv_sec: 0,
170            tv_nsec: 0,
171        })
172    }
173}
174
175impl Timespec {
176    #[inline]
177    pub const fn new() -> Self {
178        Timespec(sys::Timespec {
179            tv_sec: 0,
180            tv_nsec: 0,
181        })
182    }
183
184    #[inline]
185    pub const fn sec(mut self, sec: u64) -> Self {
186        self.0.tv_sec = sec as _;
187        self
188    }
189
190    #[inline]
191    pub const fn nsec(mut self, nsec: u32) -> Self {
192        self.0.tv_nsec = nsec as _;
193        self
194    }
195}
196
197impl From<core::time::Duration> for Timespec {
198    fn from(value: core::time::Duration) -> Self {
199        Timespec::new()
200            .sec(value.as_secs())
201            .nsec(value.subsec_nanos())
202    }
203}
204
205/// Submit arguments
206///
207/// Note that arguments that exceed their lifetime will fail to compile.
208///
209/// ```compile_fail
210/// use rustix_uring::types::{ SubmitArgs, Timespec };
211///
212/// let sigmask: libc::sigset_t = unsafe { std::mem::zeroed() };
213///
214/// let mut args = SubmitArgs::new();
215///
216/// {
217///     let ts = Timespec::new();
218///     args = args.timespec(&ts);
219///     args = args.sigmask(&sigmask);
220/// }
221///
222/// drop(args);
223/// ```
224#[derive(Default, Debug, Clone, Copy)]
225pub struct SubmitArgs<'prev: 'now, 'now> {
226    pub(crate) args: sys::io_uring_getevents_arg,
227    prev: PhantomData<&'prev ()>,
228    now: PhantomData<&'now ()>,
229}
230
231impl<'prev, 'now> SubmitArgs<'prev, 'now> {
232    #[inline]
233    pub const fn new() -> SubmitArgs<'static, 'static> {
234        let args = sys::io_uring_getevents_arg {
235            sigmask: sys::io_uring_ptr::null(),
236            sigmask_sz: 0,
237            min_wait_usec: 0,
238            ts: sys::io_uring_ptr::null(),
239        };
240
241        SubmitArgs {
242            args,
243            prev: PhantomData,
244            now: PhantomData,
245        }
246    }
247
248    #[inline]
249    pub fn sigmask<'new>(mut self, sigmask: &'new sys::KernelSigSet) -> SubmitArgs<'now, 'new> {
250        self.args.sigmask = sys::io_uring_ptr::new(cast_ptr(sigmask) as _);
251        self.args.sigmask_sz = core::mem::size_of::<sys::KernelSigSet>() as _;
252
253        SubmitArgs {
254            args: self.args,
255            prev: self.now,
256            now: PhantomData,
257        }
258    }
259
260    #[inline]
261    pub fn timespec<'new>(mut self, timespec: &'new Timespec) -> SubmitArgs<'now, 'new> {
262        self.args.ts = sys::io_uring_ptr::new(cast_ptr(timespec) as _);
263
264        SubmitArgs {
265            args: self.args,
266            prev: self.now,
267            now: PhantomData,
268        }
269    }
270}
271
272#[repr(transparent)]
273pub struct BufRingEntry(sys::io_uring_buf);
274
275/// An entry in a buf_ring that allows setting the address, length and buffer id.
276#[allow(clippy::len_without_is_empty)]
277impl BufRingEntry {
278    /// Sets the entry addr.
279    pub fn set_addr(&mut self, addr: *mut core::ffi::c_void) {
280        self.0.addr.ptr = addr;
281    }
282
283    /// Returns the entry addr.
284    pub fn addr(&self) -> *mut core::ffi::c_void {
285        self.0.addr.ptr
286    }
287
288    /// Sets the entry len.
289    pub fn set_len(&mut self, len: u32) {
290        self.0.len = len;
291    }
292
293    /// Returns the entry len.
294    pub fn len(&self) -> u32 {
295        self.0.len
296    }
297
298    /// Sets the entry bid.
299    pub fn set_bid(&mut self, bid: u16) {
300        self.0.bid = bid;
301    }
302
303    /// Returns the entry bid.
304    pub fn bid(&self) -> u16 {
305        self.0.bid
306    }
307
308    /// The offset to the ring's tail field given the ring's base address.
309    ///
310    /// The caller should ensure the ring's base address is aligned with the system's page size,
311    /// per the uring interface requirements.
312    ///
313    /// # Safety
314    ///
315    /// The ptr will be dereferenced in order to determine the address of the tail field,
316    /// so the caller is responsible for passing in a valid pointer. And not just
317    /// a valid pointer type, but also the argument must be the address to the first entry
318    /// of the buf_ring for the tail field to even be considered the tail field of the ring.
319    /// The entry must also be properly initialized.
320    pub unsafe fn tail(ring_base: *const BufRingEntry) -> *const u16 {
321        core::ptr::addr_of!(
322            (*ring_base.cast::<sys::io_uring_buf_ring>())
323                .tail_or_bufs
324                .tail
325                .as_ref()
326                .tail
327        )
328    }
329}
330
331/// A destination slot for sending fixed resources
332/// (e.g. [`opcode::MsgRingSendFd`](crate::opcode::MsgRingSendFd)).
333#[derive(Debug, Clone, Copy)]
334pub struct DestinationSlot {
335    /// Fixed slot as indexed by the kernel (target+1).
336    dest: NonZeroU32,
337}
338
339impl DestinationSlot {
340    // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always > 0.
341    const AUTO_ALLOC: NonZeroU32 =
342        unwrap_nonzero(NonZeroU32::new(sys::IORING_FILE_INDEX_ALLOC as u32));
343
344    /// Use an automatically allocated target slot.
345    pub const fn auto_target() -> Self {
346        Self {
347            dest: DestinationSlot::AUTO_ALLOC,
348        }
349    }
350
351    /// Try to use a given target slot.
352    ///
353    /// Valid slots are in the range from `0` to `u32::MAX - 2` inclusive.
354    pub fn try_from_slot_target(target: u32) -> Result<Self, u32> {
355        // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always >= 2.
356        const MAX_INDEX: u32 = unwrap_u32(DestinationSlot::AUTO_ALLOC.get().checked_sub(2));
357
358        if target > MAX_INDEX {
359            return Err(target);
360        }
361
362        let kernel_index = target.saturating_add(1);
363        // SAFETY: by construction, always clamped between 1 and IORING_FILE_INDEX_ALLOC-1.
364        debug_assert!(0 < kernel_index && kernel_index < DestinationSlot::AUTO_ALLOC.get());
365        let dest = NonZeroU32::new(kernel_index).unwrap();
366
367        Ok(Self { dest })
368    }
369
370    pub(crate) fn kernel_index_arg(&self) -> u32 {
371        self.dest.get()
372    }
373}
374
375/// Helper structure for parsing the result of a multishot [`opcode::RecvMsg`](crate::opcode::RecvMsg).
376#[derive(Debug)]
377pub struct RecvMsgOut<'buf> {
378    header: sys::io_uring_recvmsg_out,
379    /// The fixed length of the name field, in bytes.
380    ///
381    /// If the incoming name data is larger than this, it gets truncated to this.
382    /// If it is smaller, it gets 0-padded to fill the whole field. In either case,
383    /// this fixed amount of space is reserved in the result buffer.
384    msghdr_name_len: usize,
385
386    name_data: &'buf [u8],
387    control_data: &'buf [u8],
388    payload_data: &'buf [u8],
389}
390
391impl<'buf> RecvMsgOut<'buf> {
392    const DATA_START: usize = core::mem::size_of::<sys::io_uring_recvmsg_out>();
393
394    /// Parse the data buffered upon completion of a `RecvMsg` multishot operation.
395    ///
396    /// `buffer` is the whole buffer previously provided to the ring, while `msghdr`
397    /// is the same content provided as input to the corresponding SQE
398    /// (only `msg_namelen` and `msg_controllen` fields are relevant).
399    #[allow(clippy::result_unit_err)]
400    #[allow(clippy::useless_conversion)]
401    pub fn parse(buffer: &'buf [u8], msghdr: &MsgHdr) -> Result<Self, ()> {
402        let msghdr_name_len = usize::try_from(msghdr.msg_namelen).unwrap();
403        let msghdr_control_len = usize::try_from(msghdr.msg_controllen).unwrap();
404
405        if Self::DATA_START
406            .checked_add(msghdr_name_len)
407            .and_then(|acc| acc.checked_add(msghdr_control_len))
408            .map(|header_len| buffer.len() < header_len)
409            .unwrap_or(true)
410        {
411            return Err(());
412        }
413        // SAFETY: buffer (minimum) length is checked here above.
414        let header = unsafe {
415            buffer
416                .as_ptr()
417                .cast::<sys::io_uring_recvmsg_out>()
418                .read_unaligned()
419        };
420
421        // min is used because the header may indicate the true size of the data
422        // while what we received was truncated.
423        let (name_data, control_start) = {
424            let name_start = Self::DATA_START;
425            let name_data_end =
426                name_start + usize::min(usize::try_from(header.namelen).unwrap(), msghdr_name_len);
427            let name_field_end = name_start + msghdr_name_len;
428            (&buffer[name_start..name_data_end], name_field_end)
429        };
430        let (control_data, payload_start) = {
431            let control_data_end = control_start
432                + usize::min(
433                    usize::try_from(header.controllen).unwrap(),
434                    msghdr_control_len,
435                );
436            let control_field_end = control_start + msghdr_control_len;
437            (&buffer[control_start..control_data_end], control_field_end)
438        };
439        let payload_data = {
440            let payload_data_end = payload_start
441                + usize::min(
442                    usize::try_from(header.payloadlen).unwrap(),
443                    buffer.len() - payload_start,
444                );
445            &buffer[payload_start..payload_data_end]
446        };
447
448        Ok(Self {
449            header,
450            msghdr_name_len,
451            name_data,
452            control_data,
453            payload_data,
454        })
455    }
456
457    /// Return the length of the incoming `name` data.
458    ///
459    /// This may be larger than the size of the content returned by
460    /// `name_data()`, if the kernel could not fit all the incoming
461    /// data in the provided buffer size. In that case, name data in
462    /// the result buffer gets truncated.
463    pub fn incoming_name_len(&self) -> u32 {
464        self.header.namelen
465    }
466
467    /// Return whether the incoming name data was larger than the provided limit/buffer.
468    ///
469    /// When `true`, data returned by `name_data()` is truncated and
470    /// incomplete.
471    pub fn is_name_data_truncated(&self) -> bool {
472        self.header.namelen as usize > self.msghdr_name_len
473    }
474
475    /// Message control data, with the same semantics as `msghdr.msg_control`.
476    pub fn name_data(&self) -> &[u8] {
477        self.name_data
478    }
479
480    /// Return the length of the incoming `control` data.
481    ///
482    /// This may be larger than the size of the content returned by
483    /// `control_data()`, if the kernel could not fit all the incoming
484    /// data in the provided buffer size. In that case, control data in
485    /// the result buffer gets truncated.
486    pub fn incoming_control_len(&self) -> u32 {
487        self.header.controllen
488    }
489
490    /// Return whether the incoming control data was larger than the provided limit/buffer.
491    ///
492    /// When `true`, data returned by `control_data()` is truncated and
493    /// incomplete.
494    pub fn is_control_data_truncated(&self) -> bool {
495        self.header.flags.contains(sys::RecvmsgOutFlags::CTRUNC)
496    }
497
498    /// Message control data, with the same semantics as `msghdr.msg_control`.
499    pub fn control_data(&self) -> &[u8] {
500        self.control_data
501    }
502
503    /// Return whether the incoming payload was larger than the provided limit/buffer.
504    ///
505    /// When `true`, data returned by `payload_data()` is truncated and
506    /// incomplete.
507    pub fn is_payload_truncated(&self) -> bool {
508        self.header.flags.contains(sys::RecvmsgOutFlags::TRUNC)
509    }
510
511    /// Message payload, as buffered by the kernel.
512    pub fn payload_data(&self) -> &[u8] {
513        self.payload_data
514    }
515
516    /// Return the length of the incoming `payload` data.
517    ///
518    /// This may be larger than the size of the content returned by
519    /// `payload_data()`, if the kernel could not fit all the incoming
520    /// data in the provided buffer size. In that case, payload data in
521    /// the result buffer gets truncated.
522    pub fn incoming_payload_len(&self) -> u32 {
523        self.header.payloadlen
524    }
525
526    /// Message flags, with the same semantics as `msghdr.msg_flags`.
527    pub fn flags(&self) -> sys::RecvmsgOutFlags {
528        self.header.flags
529    }
530}
531
532/// [CancelBuilder] constructs match criteria for request cancellation.
533///
534/// The [CancelBuilder] can be used to selectively cancel one or more requests
535/// by user_data, fd, fixed fd, or unconditionally.
536///
537/// ### Examples
538///
539/// ```
540/// use rustix_uring::types::{CancelBuilder, Fd, Fixed};
541///
542/// // Match all in-flight requests.
543/// CancelBuilder::any();
544///
545/// // Match a single request with user_data = 42.
546/// CancelBuilder::user_data(42);
547///
548/// // Match a single request with fd = 42.
549/// CancelBuilder::fd(Fd(42));
550///
551/// // Match a single request with fixed fd = 42.
552/// CancelBuilder::fd(Fixed(42));
553///
554/// // Match all in-flight requests with user_data = 42.
555/// CancelBuilder::user_data(42).all();
556/// ```
557#[derive(Debug)]
558pub struct CancelBuilder {
559    pub(crate) flags: AsyncCancelFlags,
560    pub(crate) user_data: sys::io_uring_user_data,
561    pub(crate) fd: Option<sealed::Target>,
562}
563
564impl CancelBuilder {
565    /// Create a new [CancelBuilder] which will match any in-flight request.
566    ///
567    /// This will cancel every in-flight request in the ring.
568    ///
569    /// Async cancellation matching any requests is only available since 5.19.
570    pub const fn any() -> Self {
571        Self {
572            flags: AsyncCancelFlags::ANY,
573            user_data: sys::io_uring_user_data::zeroed(),
574            fd: None,
575        }
576    }
577
578    /// Create a new [CancelBuilder] which will match in-flight requests
579    /// with the given `user_data` value.
580    ///
581    /// The first request with the given `user_data` value will be canceled.
582    /// [CancelBuilder::all](#method.all) can be called to instead match every
583    /// request with the provided `user_data` value.
584    pub fn user_data(user_data: impl Into<sys::io_uring_user_data>) -> Self {
585        Self {
586            flags: AsyncCancelFlags::empty(),
587            user_data: user_data.into(),
588            fd: None,
589        }
590    }
591
592    /// A `const` version of [`Self::user_data`] for `u64`s.
593    pub const fn user_data_u64(u64_: u64) -> Self {
594        Self {
595            flags: AsyncCancelFlags::empty(),
596            user_data: sys::io_uring_user_data::from_u64(u64_),
597            fd: None,
598        }
599    }
600
601    /// A `const` version of [`Self::user_data`] for `*mut c_void`s.
602    pub const fn user_data_ptr(ptr: *mut core::ffi::c_void) -> Self {
603        Self {
604            flags: AsyncCancelFlags::empty(),
605            user_data: sys::io_uring_user_data::from_ptr(ptr),
606            fd: None,
607        }
608    }
609
610    /// Create a new [CancelBuilder] which will match in-flight requests with
611    /// the given `fd` value.
612    ///
613    /// The first request with the given `fd` value will be canceled. [CancelBuilder::all](#method.all)
614    /// can be called to instead match every request with the provided `fd` value.
615    ///
616    /// FD async cancellation is only available since 5.19.
617    pub fn fd(fd: impl sealed::UseFixed) -> Self {
618        let mut flags = AsyncCancelFlags::FD;
619        let target = fd.into();
620        if matches!(target, sealed::Target::Fixed(_)) {
621            flags.insert(AsyncCancelFlags::FD_FIXED);
622        }
623        Self {
624            flags,
625            user_data: sys::io_uring_user_data::default(),
626            fd: Some(target),
627        }
628    }
629
630    /// Modify the [CancelBuilder] match criteria to match all in-flight requests
631    /// rather than just the first one.
632    ///
633    /// This has no effect when combined with [CancelBuilder::any](#method.any).
634    ///
635    /// Async cancellation matching all requests is only available since 5.19.
636    pub fn all(mut self) -> Self {
637        self.flags.insert(AsyncCancelFlags::ALL);
638        self
639    }
640
641    pub(crate) fn to_fd(&self) -> i32 {
642        self.fd
643            .as_ref()
644            .map(|target| match *target {
645                sealed::Target::Fd(fd) => fd,
646                sealed::Target::Fixed(idx) => idx as i32,
647            })
648            .unwrap_or(-1)
649    }
650}
651
652/// Wrapper around `futex_waitv` as used in [`futex_waitv` system
653/// call](https://www.kernel.org/doc/html/latest/userspace-api/futex2.html).
654#[derive(Default, Debug, Clone, Copy)]
655#[repr(transparent)]
656pub struct FutexWaitV(sys::FutexWait);
657
658impl FutexWaitV {
659    pub const fn new() -> Self {
660        Self(sys::FutexWait::new())
661    }
662
663    pub const fn val(mut self, val: u64) -> Self {
664        self.0.val = val;
665        self
666    }
667
668    pub const fn uaddr(mut self, uaddr: *mut core::ffi::c_void) -> Self {
669        self.0.uaddr = sys::FutexWaitPtr::new(uaddr);
670        self
671    }
672
673    pub const fn flags(mut self, flags: sys::FutexWaitFlags) -> Self {
674        self.0.flags = flags;
675        self
676    }
677}
678
679#[cfg(test)]
680mod tests {
681    use core::time::Duration;
682
683    use crate::types::sealed::Target;
684
685    use super::*;
686
687    #[test]
688    fn timespec_from_duration_converts_correctly() {
689        let duration = Duration::new(2, 500);
690        let timespec = Timespec::from(duration);
691
692        assert_eq!(timespec.0.tv_sec as u64, duration.as_secs());
693        assert_eq!(timespec.0.tv_nsec as u32, duration.subsec_nanos());
694    }
695
696    #[test]
697    fn test_cancel_builder_flags() {
698        let cb = CancelBuilder::any();
699        assert_eq!(cb.flags, AsyncCancelFlags::ANY);
700
701        let mut cb = CancelBuilder::user_data(42);
702        assert_eq!(cb.flags, AsyncCancelFlags::empty());
703        assert_eq!(cb.user_data, sys::io_uring_user_data::from_u64(42));
704        assert!(cb.fd.is_none());
705        cb = cb.all();
706        assert_eq!(cb.flags, AsyncCancelFlags::ALL);
707
708        let mut cb = CancelBuilder::fd(Fd(42));
709        assert_eq!(cb.flags, AsyncCancelFlags::FD);
710        assert!(matches!(cb.fd, Some(Target::Fd(42))));
711        assert_eq!(cb.user_data, Default::default());
712        cb = cb.all();
713        assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::ALL);
714
715        let mut cb = CancelBuilder::fd(Fixed(42));
716        assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED);
717        assert!(matches!(cb.fd, Some(Target::Fixed(42))));
718        assert_eq!(cb.user_data, Default::default());
719        cb = cb.all();
720        assert_eq!(
721            cb.flags,
722            AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED | AsyncCancelFlags::ALL
723        );
724    }
725}