io_uring/
types.rs

1//! Common Linux types not provided by libc.
2
3pub(crate) mod sealed {
4    use super::{Fd, Fixed};
5    use std::os::unix::io::RawFd;
6
7    #[derive(Debug)]
8    pub enum Target {
9        Fd(RawFd),
10        Fixed(u32),
11    }
12
13    pub trait UseFd: Sized {
14        fn into(self) -> RawFd;
15    }
16
17    pub trait UseFixed: Sized {
18        fn into(self) -> Target;
19    }
20
21    impl UseFd for Fd {
22        #[inline]
23        fn into(self) -> RawFd {
24            self.0
25        }
26    }
27
28    impl UseFixed for Fd {
29        #[inline]
30        fn into(self) -> Target {
31            Target::Fd(self.0)
32        }
33    }
34
35    impl UseFixed for Fixed {
36        #[inline]
37        fn into(self) -> Target {
38            Target::Fixed(self.0)
39        }
40    }
41}
42
43use crate::sys;
44use crate::util::{cast_ptr, unwrap_nonzero, unwrap_u32};
45use bitflags::bitflags;
46use std::convert::TryFrom;
47use std::marker::PhantomData;
48use std::num::NonZeroU32;
49use std::os::unix::io::RawFd;
50
51pub use sys::__kernel_rwf_t as RwFlags;
52pub use sys::{
53    io_uring_region_desc, io_uring_zcrx_area_reg, io_uring_zcrx_cqe, io_uring_zcrx_ifq_reg,
54    io_uring_zcrx_rqe, IORING_MEM_REGION_TYPE_USER, IORING_ZCRX_AREA_SHIFT, IOU_PBUF_RING_INC,
55    IOU_PBUF_RING_MMAP,
56};
57
58// From linux/io_uring.h
59//
60// NOTE: bindgen skips this due to the expression so we define it manually.
61pub const IORING_ZCRX_AREA_MASK: u64 = !((1u64 << IORING_ZCRX_AREA_SHIFT) - 1);
62
63/// Opaque types, you should use [`statx`](struct@libc::statx) instead.
64#[repr(C)]
65#[allow(non_camel_case_types)]
66pub struct statx {
67    _priv: (),
68}
69
70/// Opaque types, you should use [`epoll_event`](libc::epoll_event) instead.
71#[repr(C)]
72#[allow(non_camel_case_types)]
73pub struct epoll_event {
74    _priv: (),
75}
76
77/// A file descriptor that has not been registered with io_uring.
78#[derive(Debug, Clone, Copy)]
79#[repr(transparent)]
80pub struct Fd(pub RawFd);
81
82/// A file descriptor that has been registered with io_uring using
83/// [`Submitter::register_files`](crate::Submitter::register_files) or [`Submitter::register_files_sparse`](crate::Submitter::register_files_sparse).
84/// This can reduce overhead compared to using [`Fd`] in some cases.
85#[derive(Debug, Clone, Copy)]
86#[repr(transparent)]
87pub struct Fixed(pub u32);
88
89bitflags! {
90    /// Options for [`Timeout`](super::Timeout).
91    ///
92    /// The default behavior is to treat the timespec as a relative time interval. `flags` may
93    /// contain [`TimeoutFlags::ABS`] to indicate the timespec represents an absolute
94    /// time. When an absolute time is being specified, the kernel will use its monotonic clock
95    /// unless one of the following flags is set (they may not both be set):
96    /// [`TimeoutFlags::BOOTTIME`] or [`TimeoutFlags::REALTIME`].
97    ///
98    /// The default behavior when the timeout expires is to sever dependent links, as a failed
99    /// request normally would. To keep the links untouched include [`TimeoutFlags::ETIME_SUCCESS`].
100    /// CQE will still contain -libc::ETIME in the res field
101    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
102    pub struct TimeoutFlags: u32 {
103        const ABS = sys::IORING_TIMEOUT_ABS;
104
105        const BOOTTIME = sys::IORING_TIMEOUT_BOOTTIME;
106
107        const REALTIME = sys::IORING_TIMEOUT_REALTIME;
108
109        const LINK_TIMEOUT_UPDATE = sys::IORING_LINK_TIMEOUT_UPDATE;
110
111        const ETIME_SUCCESS = sys::IORING_TIMEOUT_ETIME_SUCCESS;
112
113        const MULTISHOT = sys::IORING_TIMEOUT_MULTISHOT;
114    }
115}
116
117bitflags! {
118    /// Options for [`Fsync`](super::Fsync).
119    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
120    pub struct FsyncFlags: u32 {
121        const DATASYNC = sys::IORING_FSYNC_DATASYNC;
122    }
123}
124
125bitflags! {
126    /// Options for [`AsyncCancel`](super::AsyncCancel) and
127    /// [`Submitter::register_sync_cancel`](super::Submitter::register_sync_cancel).
128    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
129    pub(crate) struct AsyncCancelFlags: u32 {
130        /// Cancel all requests that match the given criteria, rather
131        /// than just canceling the first one found.
132        ///
133        /// Available since 5.19.
134        const ALL = sys::IORING_ASYNC_CANCEL_ALL;
135
136        /// Match based on the file descriptor used in the original
137        /// request rather than the user_data.
138        ///
139        /// Available since 5.19.
140        const FD = sys::IORING_ASYNC_CANCEL_FD;
141
142        /// Match any request in the ring, regardless of user_data or
143        /// file descriptor.  Can be used to cancel any pending
144        /// request in the ring.
145        ///
146        /// Available since 5.19.
147        const ANY = sys::IORING_ASYNC_CANCEL_ANY;
148
149        /// Match based on the fixed file descriptor used in the original
150        /// request rather than the user_data.
151        ///
152        /// Available since 6.0
153        const FD_FIXED = sys::IORING_ASYNC_CANCEL_FD_FIXED;
154    }
155}
156
157/// Wrapper around `open_how` as used in [the `openat2(2)` system
158/// call](https://man7.org/linux/man-pages/man2/openat2.2.html).
159#[derive(Default, Debug, Clone, Copy)]
160#[repr(transparent)]
161pub struct OpenHow(sys::open_how);
162
163impl OpenHow {
164    pub const fn new() -> Self {
165        OpenHow(sys::open_how {
166            flags: 0,
167            mode: 0,
168            resolve: 0,
169        })
170    }
171
172    pub const fn flags(mut self, flags: u64) -> Self {
173        self.0.flags = flags;
174        self
175    }
176
177    pub const fn mode(mut self, mode: u64) -> Self {
178        self.0.mode = mode;
179        self
180    }
181
182    pub const fn resolve(mut self, resolve: u64) -> Self {
183        self.0.resolve = resolve;
184        self
185    }
186}
187
188#[derive(Default, Debug, Clone, Copy)]
189#[repr(transparent)]
190pub struct Timespec(pub(crate) sys::__kernel_timespec);
191
192impl Timespec {
193    #[inline]
194    pub const fn new() -> Self {
195        Timespec(sys::__kernel_timespec {
196            tv_sec: 0,
197            tv_nsec: 0,
198        })
199    }
200
201    #[inline]
202    pub const fn sec(mut self, sec: u64) -> Self {
203        self.0.tv_sec = sec as _;
204        self
205    }
206
207    #[inline]
208    pub const fn nsec(mut self, nsec: u32) -> Self {
209        self.0.tv_nsec = nsec as _;
210        self
211    }
212}
213
214impl From<std::time::Duration> for Timespec {
215    fn from(value: std::time::Duration) -> Self {
216        Timespec::new()
217            .sec(value.as_secs())
218            .nsec(value.subsec_nanos())
219    }
220}
221
222/// Submit arguments
223///
224/// Note that arguments that exceed their lifetime will fail to compile.
225///
226/// ```compile_fail
227/// use io_uring::types::{ SubmitArgs, Timespec };
228///
229/// let sigmask: libc::sigset_t = unsafe { std::mem::zeroed() };
230///
231/// let mut args = SubmitArgs::new();
232///
233/// {
234///     let ts = Timespec::new();
235///     args = args.timespec(&ts);
236///     args = args.sigmask(&sigmask);
237/// }
238///
239/// drop(args);
240/// ```
241#[derive(Default, Debug, Clone, Copy)]
242pub struct SubmitArgs<'prev: 'now, 'now> {
243    pub(crate) args: sys::io_uring_getevents_arg,
244    prev: PhantomData<&'prev ()>,
245    now: PhantomData<&'now ()>,
246}
247
248impl<'prev, 'now> SubmitArgs<'prev, 'now> {
249    #[inline]
250    pub const fn new() -> SubmitArgs<'static, 'static> {
251        let args = sys::io_uring_getevents_arg {
252            sigmask: 0,
253            sigmask_sz: 0,
254            min_wait_usec: 0,
255            ts: 0,
256        };
257
258        SubmitArgs {
259            args,
260            prev: PhantomData,
261            now: PhantomData,
262        }
263    }
264
265    #[inline]
266    /// Signals to mask during waiting for the result
267    ///
268    /// Masked signals will be restored after submit operation returns
269    pub fn sigmask<'new>(mut self, sigmask: &'new libc::sigset_t) -> SubmitArgs<'now, 'new> {
270        self.args.sigmask = cast_ptr(sigmask) as _;
271        self.args.sigmask_sz = std::mem::size_of::<libc::sigset_t>() as _;
272
273        SubmitArgs {
274            args: self.args,
275            prev: self.now,
276            now: PhantomData,
277        }
278    }
279
280    #[inline]
281    /// Timeout for submit operation
282    pub fn timespec<'new>(mut self, timespec: &'new Timespec) -> SubmitArgs<'now, 'new> {
283        self.args.ts = cast_ptr(timespec) as _;
284
285        SubmitArgs {
286            args: self.args,
287            prev: self.now,
288            now: PhantomData,
289        }
290    }
291}
292
293#[repr(transparent)]
294pub struct BufRingEntry(sys::io_uring_buf);
295
296/// An entry in a buf_ring that allows setting the address, length and buffer id.
297#[allow(clippy::len_without_is_empty)]
298impl BufRingEntry {
299    /// Sets the entry addr.
300    pub fn set_addr(&mut self, addr: u64) {
301        self.0.addr = addr;
302    }
303
304    /// Returns the entry addr.
305    pub fn addr(&self) -> u64 {
306        self.0.addr
307    }
308
309    /// Sets the entry len.
310    pub fn set_len(&mut self, len: u32) {
311        self.0.len = len;
312    }
313
314    /// Returns the entry len.
315    pub fn len(&self) -> u32 {
316        self.0.len
317    }
318
319    /// Sets the entry bid.
320    pub fn set_bid(&mut self, bid: u16) {
321        self.0.bid = bid;
322    }
323
324    /// Returns the entry bid.
325    pub fn bid(&self) -> u16 {
326        self.0.bid
327    }
328
329    /// The offset to the ring's tail field given the ring's base address.
330    ///
331    /// The caller should ensure the ring's base address is aligned with the system's page size,
332    /// per the uring interface requirements.
333    ///
334    /// # Safety
335    ///
336    /// The ptr will be dereferenced in order to determine the address of the resv field,
337    /// so the caller is responsible for passing in a valid pointer. And not just
338    /// a valid pointer type, but also the argument must be the address to the first entry
339    /// of the buf_ring for the resv field to even be considered the tail field of the ring.
340    /// The entry must also be properly initialized.
341    pub unsafe fn tail(ring_base: *const BufRingEntry) -> *const u16 {
342        std::ptr::addr_of!((*ring_base).0.resv)
343    }
344}
345
346/// A destination slot for sending fixed resources
347/// (e.g. [`opcode::MsgRingSendFd`](crate::opcode::MsgRingSendFd)).
348#[derive(Debug, Clone, Copy)]
349pub struct DestinationSlot {
350    /// Fixed slot as indexed by the kernel (target+1).
351    dest: NonZeroU32,
352}
353
354impl DestinationSlot {
355    // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always > 0.
356    const AUTO_ALLOC: NonZeroU32 =
357        unwrap_nonzero(NonZeroU32::new(sys::IORING_FILE_INDEX_ALLOC as u32));
358
359    /// Use an automatically allocated target slot.
360    pub const fn auto_target() -> Self {
361        Self {
362            dest: DestinationSlot::AUTO_ALLOC,
363        }
364    }
365
366    /// Try to use a given target slot.
367    ///
368    /// Valid slots are in the range from `0` to `u32::MAX - 2` inclusive.
369    pub fn try_from_slot_target(target: u32) -> Result<Self, u32> {
370        // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always >= 2.
371        const MAX_INDEX: u32 = unwrap_u32(DestinationSlot::AUTO_ALLOC.get().checked_sub(2));
372
373        if target > MAX_INDEX {
374            return Err(target);
375        }
376
377        let kernel_index = target.saturating_add(1);
378        // SAFETY: by construction, always clamped between 1 and IORING_FILE_INDEX_ALLOC-1.
379        debug_assert!(0 < kernel_index && kernel_index < DestinationSlot::AUTO_ALLOC.get());
380        let dest = NonZeroU32::new(kernel_index).unwrap();
381
382        Ok(Self { dest })
383    }
384
385    pub(crate) fn kernel_index_arg(&self) -> u32 {
386        self.dest.get()
387    }
388}
389
390/// Helper structure for parsing the result of a multishot [`opcode::RecvMsg`](crate::opcode::RecvMsg).
391#[derive(Debug)]
392pub struct RecvMsgOut<'buf> {
393    header: sys::io_uring_recvmsg_out,
394    /// The fixed length of the name field, in bytes.
395    ///
396    /// If the incoming name data is larger than this, it gets truncated to this.
397    /// If it is smaller, it gets 0-padded to fill the whole field. In either case,
398    /// this fixed amount of space is reserved in the result buffer.
399    msghdr_name_len: usize,
400
401    name_data: &'buf [u8],
402    control_data: &'buf [u8],
403    payload_data: &'buf [u8],
404}
405
406impl<'buf> RecvMsgOut<'buf> {
407    const DATA_START: usize = std::mem::size_of::<sys::io_uring_recvmsg_out>();
408
409    /// Parse the data buffered upon completion of a `RecvMsg` multishot operation.
410    ///
411    /// `buffer` is the whole buffer previously provided to the ring, while `msghdr`
412    /// is the same content provided as input to the corresponding SQE
413    /// (only `msg_namelen` and `msg_controllen` fields are relevant).
414    #[allow(clippy::result_unit_err)]
415    #[allow(clippy::useless_conversion)]
416    pub fn parse(buffer: &'buf [u8], msghdr: &libc::msghdr) -> Result<Self, ()> {
417        let msghdr_name_len = usize::try_from(msghdr.msg_namelen).unwrap();
418        let msghdr_control_len = usize::try_from(msghdr.msg_controllen).unwrap();
419
420        if Self::DATA_START
421            .checked_add(msghdr_name_len)
422            .and_then(|acc| acc.checked_add(msghdr_control_len))
423            .map(|header_len| buffer.len() < header_len)
424            .unwrap_or(true)
425        {
426            return Err(());
427        }
428        // SAFETY: buffer (minimum) length is checked here above.
429        let header = unsafe {
430            buffer
431                .as_ptr()
432                .cast::<sys::io_uring_recvmsg_out>()
433                .read_unaligned()
434        };
435
436        // min is used because the header may indicate the true size of the data
437        // while what we received was truncated.
438        let (name_data, control_start) = {
439            let name_start = Self::DATA_START;
440            let name_data_end =
441                name_start + usize::min(usize::try_from(header.namelen).unwrap(), msghdr_name_len);
442            let name_field_end = name_start + msghdr_name_len;
443            (&buffer[name_start..name_data_end], name_field_end)
444        };
445        let (control_data, payload_start) = {
446            let control_data_end = control_start
447                + usize::min(
448                    usize::try_from(header.controllen).unwrap(),
449                    msghdr_control_len,
450                );
451            let control_field_end = control_start + msghdr_control_len;
452            (&buffer[control_start..control_data_end], control_field_end)
453        };
454        let payload_data = {
455            let payload_data_end = payload_start
456                + usize::min(
457                    usize::try_from(header.payloadlen).unwrap(),
458                    buffer.len() - payload_start,
459                );
460            &buffer[payload_start..payload_data_end]
461        };
462
463        Ok(Self {
464            header,
465            msghdr_name_len,
466            name_data,
467            control_data,
468            payload_data,
469        })
470    }
471
472    /// Return the length of the incoming `name` data.
473    ///
474    /// This may be larger than the size of the content returned by
475    /// `name_data()`, if the kernel could not fit all the incoming
476    /// data in the provided buffer size. In that case, name data in
477    /// the result buffer gets truncated.
478    pub fn incoming_name_len(&self) -> u32 {
479        self.header.namelen
480    }
481
482    /// Return whether the incoming name data was larger than the provided limit/buffer.
483    ///
484    /// When `true`, data returned by `name_data()` is truncated and
485    /// incomplete.
486    pub fn is_name_data_truncated(&self) -> bool {
487        self.header.namelen as usize > self.msghdr_name_len
488    }
489
490    /// Message control data, with the same semantics as `msghdr.msg_control`.
491    pub fn name_data(&self) -> &[u8] {
492        self.name_data
493    }
494
495    /// Return the length of the incoming `control` data.
496    ///
497    /// This may be larger than the size of the content returned by
498    /// `control_data()`, if the kernel could not fit all the incoming
499    /// data in the provided buffer size. In that case, control data in
500    /// the result buffer gets truncated.
501    pub fn incoming_control_len(&self) -> u32 {
502        self.header.controllen
503    }
504
505    /// Return whether the incoming control data was larger than the provided limit/buffer.
506    ///
507    /// When `true`, data returned by `control_data()` is truncated and
508    /// incomplete.
509    pub fn is_control_data_truncated(&self) -> bool {
510        (self.header.flags & u32::try_from(libc::MSG_CTRUNC).unwrap()) != 0
511    }
512
513    /// Message control data, with the same semantics as `msghdr.msg_control`.
514    pub fn control_data(&self) -> &[u8] {
515        self.control_data
516    }
517
518    /// Return whether the incoming payload was larger than the provided limit/buffer.
519    ///
520    /// When `true`, data returned by `payload_data()` is truncated and
521    /// incomplete.
522    pub fn is_payload_truncated(&self) -> bool {
523        (self.header.flags & u32::try_from(libc::MSG_TRUNC).unwrap()) != 0
524    }
525
526    /// Message payload, as buffered by the kernel.
527    pub fn payload_data(&self) -> &[u8] {
528        self.payload_data
529    }
530
531    /// Return the length of the incoming `payload` data.
532    ///
533    /// This may be larger than the size of the content returned by
534    /// `payload_data()`, if the kernel could not fit all the incoming
535    /// data in the provided buffer size. In that case, payload data in
536    /// the result buffer gets truncated.
537    pub fn incoming_payload_len(&self) -> u32 {
538        self.header.payloadlen
539    }
540
541    /// Message flags, with the same semantics as `msghdr.msg_flags`.
542    pub fn flags(&self) -> u32 {
543        self.header.flags
544    }
545}
546
547/// [CancelBuilder] constructs match criteria for request cancellation.
548///
549/// The [CancelBuilder] can be used to selectively cancel one or more requests
550/// by user_data, fd, fixed fd, or unconditionally.
551///
552/// ### Examples
553///
554/// ```
555/// use io_uring::types::{CancelBuilder, Fd, Fixed};
556///
557/// // Match all in-flight requests.
558/// CancelBuilder::any();
559///
560/// // Match a single request with user_data = 42.
561/// CancelBuilder::user_data(42);
562///
563/// // Match a single request with fd = 42.
564/// CancelBuilder::fd(Fd(42));
565///
566/// // Match a single request with fixed fd = 42.
567/// CancelBuilder::fd(Fixed(42));
568///
569/// // Match all in-flight requests with user_data = 42.
570/// CancelBuilder::user_data(42).all();
571/// ```
572#[derive(Debug)]
573pub struct CancelBuilder {
574    pub(crate) flags: AsyncCancelFlags,
575    pub(crate) user_data: Option<u64>,
576    pub(crate) fd: Option<sealed::Target>,
577}
578
579impl CancelBuilder {
580    /// Create a new [CancelBuilder] which will match any in-flight request.
581    ///
582    /// This will cancel every in-flight request in the ring.
583    ///
584    /// Async cancellation matching any requests is only available since 5.19.
585    pub const fn any() -> Self {
586        Self {
587            flags: AsyncCancelFlags::ANY,
588            user_data: None,
589            fd: None,
590        }
591    }
592
593    /// Create a new [CancelBuilder] which will match in-flight requests
594    /// with the given `user_data` value.
595    ///
596    /// The first request with the given `user_data` value will be canceled.
597    /// [CancelBuilder::all](#method.all) can be called to instead match every
598    /// request with the provided `user_data` value.
599    pub const fn user_data(user_data: u64) -> Self {
600        Self {
601            flags: AsyncCancelFlags::empty(),
602            user_data: Some(user_data),
603            fd: None,
604        }
605    }
606
607    /// Create a new [CancelBuilder] which will match in-flight requests with
608    /// the given `fd` value.
609    ///
610    /// The first request with the given `fd` value will be canceled. [CancelBuilder::all](#method.all)
611    /// can be called to instead match every request with the provided `fd` value.
612    ///
613    /// FD async cancellation is only available since 5.19.
614    pub fn fd(fd: impl sealed::UseFixed) -> Self {
615        let mut flags = AsyncCancelFlags::FD;
616        let target = fd.into();
617        if matches!(target, sealed::Target::Fixed(_)) {
618            flags.insert(AsyncCancelFlags::FD_FIXED);
619        }
620        Self {
621            flags,
622            user_data: None,
623            fd: Some(target),
624        }
625    }
626
627    /// Modify the [CancelBuilder] match criteria to match all in-flight requests
628    /// rather than just the first one.
629    ///
630    /// This has no effect when combined with [CancelBuilder::any](#method.any).
631    ///
632    /// Async cancellation matching all requests is only available since 5.19.
633    pub fn all(mut self) -> Self {
634        self.flags.insert(AsyncCancelFlags::ALL);
635        self
636    }
637
638    pub(crate) fn to_fd(&self) -> i32 {
639        self.fd
640            .as_ref()
641            .map(|target| match *target {
642                sealed::Target::Fd(fd) => fd,
643                sealed::Target::Fixed(idx) => idx as i32,
644            })
645            .unwrap_or(-1)
646    }
647}
648
649/// Wrapper around `futex_waitv` as used in [`futex_waitv` system
650/// call](https://www.kernel.org/doc/html/latest/userspace-api/futex2.html).
651#[derive(Default, Debug, Clone, Copy)]
652#[repr(transparent)]
653pub struct FutexWaitV(sys::futex_waitv);
654
655impl FutexWaitV {
656    pub const fn new() -> Self {
657        Self(sys::futex_waitv {
658            val: 0,
659            uaddr: 0,
660            flags: 0,
661            __reserved: 0,
662        })
663    }
664
665    pub const fn val(mut self, val: u64) -> Self {
666        self.0.val = val;
667        self
668    }
669
670    pub const fn uaddr(mut self, uaddr: u64) -> Self {
671        self.0.uaddr = uaddr;
672        self
673    }
674
675    pub const fn flags(mut self, flags: u32) -> Self {
676        self.0.flags = flags;
677        self
678    }
679}
680
681#[cfg(test)]
682mod tests {
683    use std::time::Duration;
684
685    use crate::types::sealed::Target;
686
687    use super::*;
688
689    #[test]
690    fn timespec_from_duration_converts_correctly() {
691        let duration = Duration::new(2, 500);
692        let timespec = Timespec::from(duration);
693
694        assert_eq!(timespec.0.tv_sec as u64, duration.as_secs());
695        assert_eq!(timespec.0.tv_nsec as u32, duration.subsec_nanos());
696    }
697
698    #[test]
699    fn test_cancel_builder_flags() {
700        let cb = CancelBuilder::any();
701        assert_eq!(cb.flags, AsyncCancelFlags::ANY);
702
703        let mut cb = CancelBuilder::user_data(42);
704        assert_eq!(cb.flags, AsyncCancelFlags::empty());
705        assert_eq!(cb.user_data, Some(42));
706        assert!(cb.fd.is_none());
707        cb = cb.all();
708        assert_eq!(cb.flags, AsyncCancelFlags::ALL);
709
710        let mut cb = CancelBuilder::fd(Fd(42));
711        assert_eq!(cb.flags, AsyncCancelFlags::FD);
712        assert!(matches!(cb.fd, Some(Target::Fd(42))));
713        assert!(cb.user_data.is_none());
714        cb = cb.all();
715        assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::ALL);
716
717        let mut cb = CancelBuilder::fd(Fixed(42));
718        assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED);
719        assert!(matches!(cb.fd, Some(Target::Fixed(42))));
720        assert!(cb.user_data.is_none());
721        cb = cb.all();
722        assert_eq!(
723            cb.flags,
724            AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED | AsyncCancelFlags::ALL
725        );
726    }
727}