io_uring/types.rs
1//! Common Linux types not provided by libc.
2
3pub(crate) mod sealed {
4 use super::{Fd, Fixed};
5 use std::os::unix::io::RawFd;
6
7 #[derive(Debug)]
8 pub enum Target {
9 Fd(RawFd),
10 Fixed(u32),
11 }
12
13 pub trait UseFd: Sized {
14 fn into(self) -> RawFd;
15 }
16
17 pub trait UseFixed: Sized {
18 fn into(self) -> Target;
19 }
20
21 impl UseFd for Fd {
22 #[inline]
23 fn into(self) -> RawFd {
24 self.0
25 }
26 }
27
28 impl UseFixed for Fd {
29 #[inline]
30 fn into(self) -> Target {
31 Target::Fd(self.0)
32 }
33 }
34
35 impl UseFixed for Fixed {
36 #[inline]
37 fn into(self) -> Target {
38 Target::Fixed(self.0)
39 }
40 }
41}
42
43use crate::sys;
44use crate::util::{cast_ptr, unwrap_nonzero, unwrap_u32};
45use bitflags::bitflags;
46use std::convert::TryFrom;
47use std::marker::PhantomData;
48use std::num::NonZeroU32;
49use std::os::unix::io::RawFd;
50
51#[deprecated]
52pub type RwFlags = u32;
53pub use sys::{
54 io_uring_region_desc, io_uring_zcrx_area_reg, io_uring_zcrx_cqe, io_uring_zcrx_ifq_reg,
55 io_uring_zcrx_rqe, IORING_MEM_REGION_TYPE_USER, IORING_ZCRX_AREA_SHIFT, IOU_PBUF_RING_INC,
56 IOU_PBUF_RING_MMAP,
57};
58
59// From linux/io_uring.h
60//
61// NOTE: bindgen skips this due to the expression so we define it manually.
62pub const IORING_ZCRX_AREA_MASK: u64 = !((1u64 << IORING_ZCRX_AREA_SHIFT) - 1);
63
64/// Opaque types, you should use [`statx`](struct@libc::statx) instead.
65#[repr(C)]
66#[allow(non_camel_case_types)]
67pub struct statx {
68 _priv: (),
69}
70
71/// Opaque types, you should use [`epoll_event`](libc::epoll_event) instead.
72#[repr(C)]
73#[allow(non_camel_case_types)]
74pub struct epoll_event {
75 _priv: (),
76}
77
78/// A file descriptor that has not been registered with io_uring.
79#[derive(Debug, Clone, Copy)]
80#[repr(transparent)]
81pub struct Fd(pub RawFd);
82
83/// A file descriptor that has been registered with io_uring using
84/// [`Submitter::register_files`](crate::Submitter::register_files) or [`Submitter::register_files_sparse`](crate::Submitter::register_files_sparse).
85/// This can reduce overhead compared to using [`Fd`] in some cases.
86#[derive(Debug, Clone, Copy)]
87#[repr(transparent)]
88pub struct Fixed(pub u32);
89
90bitflags! {
91 /// Options for [`Timeout`](super::Timeout).
92 ///
93 /// The default behavior is to treat the timespec as a relative time interval. `flags` may
94 /// contain [`TimeoutFlags::ABS`] to indicate the timespec represents an absolute
95 /// time. When an absolute time is being specified, the kernel will use its monotonic clock
96 /// unless one of the following flags is set (they may not both be set):
97 /// [`TimeoutFlags::BOOTTIME`] or [`TimeoutFlags::REALTIME`].
98 ///
99 /// The default behavior when the timeout expires is to sever dependent links, as a failed
100 /// request normally would. To keep the links untouched include [`TimeoutFlags::ETIME_SUCCESS`].
101 /// CQE will still contain -libc::ETIME in the res field
102 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
103 pub struct TimeoutFlags: u32 {
104 const ABS = sys::IORING_TIMEOUT_ABS;
105
106 const BOOTTIME = sys::IORING_TIMEOUT_BOOTTIME;
107
108 const REALTIME = sys::IORING_TIMEOUT_REALTIME;
109
110 const LINK_TIMEOUT_UPDATE = sys::IORING_LINK_TIMEOUT_UPDATE;
111
112 const ETIME_SUCCESS = sys::IORING_TIMEOUT_ETIME_SUCCESS;
113
114 const MULTISHOT = sys::IORING_TIMEOUT_MULTISHOT;
115 }
116}
117
118bitflags! {
119 /// Options for [`Fsync`](super::Fsync).
120 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
121 pub struct FsyncFlags: u32 {
122 const DATASYNC = sys::IORING_FSYNC_DATASYNC;
123 }
124}
125
126bitflags! {
127 /// Options for [`AsyncCancel`](super::AsyncCancel) and
128 /// [`Submitter::register_sync_cancel`](super::Submitter::register_sync_cancel).
129 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
130 pub(crate) struct AsyncCancelFlags: u32 {
131 /// Cancel all requests that match the given criteria, rather
132 /// than just canceling the first one found.
133 ///
134 /// Available since 5.19.
135 const ALL = sys::IORING_ASYNC_CANCEL_ALL;
136
137 /// Match based on the file descriptor used in the original
138 /// request rather than the user_data.
139 ///
140 /// Available since 5.19.
141 const FD = sys::IORING_ASYNC_CANCEL_FD;
142
143 /// Match any request in the ring, regardless of user_data or
144 /// file descriptor. Can be used to cancel any pending
145 /// request in the ring.
146 ///
147 /// Available since 5.19.
148 const ANY = sys::IORING_ASYNC_CANCEL_ANY;
149
150 /// Match based on the fixed file descriptor used in the original
151 /// request rather than the user_data.
152 ///
153 /// Available since 6.0
154 const FD_FIXED = sys::IORING_ASYNC_CANCEL_FD_FIXED;
155 }
156}
157
158/// Wrapper around `open_how` as used in [the `openat2(2)` system
159/// call](https://man7.org/linux/man-pages/man2/openat2.2.html).
160#[derive(Default, Debug, Clone, Copy)]
161#[repr(transparent)]
162pub struct OpenHow(sys::open_how);
163
164impl OpenHow {
165 pub const fn new() -> Self {
166 OpenHow(sys::open_how {
167 flags: 0,
168 mode: 0,
169 resolve: 0,
170 })
171 }
172
173 pub const fn flags(mut self, flags: u64) -> Self {
174 self.0.flags = flags;
175 self
176 }
177
178 pub const fn mode(mut self, mode: u64) -> Self {
179 self.0.mode = mode;
180 self
181 }
182
183 pub const fn resolve(mut self, resolve: u64) -> Self {
184 self.0.resolve = resolve;
185 self
186 }
187}
188
189#[derive(Default, Debug, Clone, Copy)]
190#[repr(transparent)]
191pub struct Timespec(pub(crate) sys::__kernel_timespec);
192
193impl Timespec {
194 #[inline]
195 pub const fn new() -> Self {
196 Timespec(sys::__kernel_timespec {
197 tv_sec: 0,
198 tv_nsec: 0,
199 })
200 }
201
202 #[inline]
203 pub const fn sec(mut self, sec: u64) -> Self {
204 self.0.tv_sec = sec as _;
205 self
206 }
207
208 #[inline]
209 pub const fn nsec(mut self, nsec: u32) -> Self {
210 self.0.tv_nsec = nsec as _;
211 self
212 }
213}
214
215impl From<std::time::Duration> for Timespec {
216 fn from(value: std::time::Duration) -> Self {
217 Timespec::new()
218 .sec(value.as_secs())
219 .nsec(value.subsec_nanos())
220 }
221}
222
223/// Submit arguments
224///
225/// Note that arguments that exceed their lifetime will fail to compile.
226///
227/// ```compile_fail
228/// use io_uring::types::{ SubmitArgs, Timespec };
229///
230/// let sigmask: libc::sigset_t = unsafe { std::mem::zeroed() };
231///
232/// let mut args = SubmitArgs::new();
233///
234/// {
235/// let ts = Timespec::new();
236/// args = args.timespec(&ts);
237/// args = args.sigmask(&sigmask);
238/// }
239///
240/// drop(args);
241/// ```
242#[repr(transparent)]
243#[derive(Default, Debug, Clone, Copy)]
244pub struct SubmitArgs<'prev: 'now, 'now> {
245 pub(crate) args: sys::io_uring_getevents_arg,
246 prev: PhantomData<&'prev ()>,
247 now: PhantomData<&'now ()>,
248}
249
250impl<'prev, 'now> SubmitArgs<'prev, 'now> {
251 #[inline]
252 pub const fn new() -> SubmitArgs<'static, 'static> {
253 let args = sys::io_uring_getevents_arg {
254 sigmask: 0,
255 sigmask_sz: 0,
256 min_wait_usec: 0,
257 ts: 0,
258 };
259
260 SubmitArgs {
261 args,
262 prev: PhantomData,
263 now: PhantomData,
264 }
265 }
266
267 #[inline]
268 /// Signals to mask during waiting for the result
269 ///
270 /// Masked signals will be restored after submit operation returns
271 pub fn sigmask<'new>(mut self, sigmask: &'new libc::sigset_t) -> SubmitArgs<'now, 'new> {
272 self.args.sigmask = cast_ptr(sigmask) as _;
273 self.args.sigmask_sz = std::mem::size_of::<libc::sigset_t>() as _;
274
275 SubmitArgs {
276 args: self.args,
277 prev: self.now,
278 now: PhantomData,
279 }
280 }
281
282 /// Sets a timeout in microseconds to start waiting for a minimum of a single completion.
283 ///
284 /// Once the timeout expires, the kernel will return when a single completion has been received
285 /// instead of waiting for the minimum amount of completions specified by the `want` parameter
286 /// in the call to [`Submitter::submit_and_wait`](crate::Submitter::submit_and_wait) or
287 /// [`Submitter::submit_with_args`](crate::Submitter::submit_with_args).
288 ///
289 /// Available since 6.12. Use the
290 /// [`Parameters::is_feature_min_timeout`](crate::Parameters::is_feature_min_timeout) method to
291 /// check for availability.
292 #[inline]
293 pub fn min_wait_usec(mut self, min_wait_usec: u32) -> Self {
294 self.args.min_wait_usec = min_wait_usec;
295 self
296 }
297
298 #[inline]
299 /// Timeout for submit operation
300 pub fn timespec<'new>(mut self, timespec: &'new Timespec) -> SubmitArgs<'now, 'new> {
301 self.args.ts = cast_ptr(timespec) as _;
302
303 SubmitArgs {
304 args: self.args,
305 prev: self.now,
306 now: PhantomData,
307 }
308 }
309}
310
311#[repr(transparent)]
312pub struct BufRingEntry(sys::io_uring_buf);
313
314/// An entry in a buf_ring that allows setting the address, length and buffer id.
315#[allow(clippy::len_without_is_empty)]
316impl BufRingEntry {
317 /// Sets the entry addr.
318 pub fn set_addr(&mut self, addr: u64) {
319 self.0.addr = addr;
320 }
321
322 /// Returns the entry addr.
323 pub fn addr(&self) -> u64 {
324 self.0.addr
325 }
326
327 /// Sets the entry len.
328 pub fn set_len(&mut self, len: u32) {
329 self.0.len = len;
330 }
331
332 /// Returns the entry len.
333 pub fn len(&self) -> u32 {
334 self.0.len
335 }
336
337 /// Sets the entry bid.
338 pub fn set_bid(&mut self, bid: u16) {
339 self.0.bid = bid;
340 }
341
342 /// Returns the entry bid.
343 pub fn bid(&self) -> u16 {
344 self.0.bid
345 }
346
347 /// The offset to the ring's tail field given the ring's base address.
348 ///
349 /// The caller should ensure the ring's base address is aligned with the system's page size,
350 /// per the uring interface requirements.
351 ///
352 /// # Safety
353 ///
354 /// The ptr will be dereferenced in order to determine the address of the resv field,
355 /// so the caller is responsible for passing in a valid pointer. And not just
356 /// a valid pointer type, but also the argument must be the address to the first entry
357 /// of the buf_ring for the resv field to even be considered the tail field of the ring.
358 /// The entry must also be properly initialized.
359 pub unsafe fn tail(ring_base: *const BufRingEntry) -> *const u16 {
360 std::ptr::addr_of!((*ring_base).0.resv)
361 }
362}
363
364/// A destination slot for sending fixed resources
365/// (e.g. [`opcode::MsgRingSendFd`](crate::opcode::MsgRingSendFd)).
366#[derive(Debug, Clone, Copy)]
367pub struct DestinationSlot {
368 /// Fixed slot as indexed by the kernel (target+1).
369 dest: NonZeroU32,
370}
371
372impl DestinationSlot {
373 // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always > 0.
374 const AUTO_ALLOC: NonZeroU32 =
375 unwrap_nonzero(NonZeroU32::new(sys::IORING_FILE_INDEX_ALLOC as u32));
376
377 /// Use an automatically allocated target slot.
378 pub const fn auto_target() -> Self {
379 Self {
380 dest: DestinationSlot::AUTO_ALLOC,
381 }
382 }
383
384 /// Try to use a given target slot.
385 ///
386 /// Valid slots are in the range from `0` to `u32::MAX - 2` inclusive.
387 pub fn try_from_slot_target(target: u32) -> Result<Self, u32> {
388 // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always >= 2.
389 const MAX_INDEX: u32 = unwrap_u32(DestinationSlot::AUTO_ALLOC.get().checked_sub(2));
390
391 if target > MAX_INDEX {
392 return Err(target);
393 }
394
395 let kernel_index = target.saturating_add(1);
396 // SAFETY: by construction, always clamped between 1 and IORING_FILE_INDEX_ALLOC-1.
397 debug_assert!(0 < kernel_index && kernel_index < DestinationSlot::AUTO_ALLOC.get());
398 let dest = NonZeroU32::new(kernel_index).unwrap();
399
400 Ok(Self { dest })
401 }
402
403 pub(crate) fn kernel_index_arg(&self) -> u32 {
404 self.dest.get()
405 }
406}
407
408/// Helper structure for parsing the result of a multishot [`opcode::RecvMsg`](crate::opcode::RecvMsg).
409#[derive(Debug)]
410pub struct RecvMsgOut<'buf> {
411 header: sys::io_uring_recvmsg_out,
412 /// The fixed length of the name field, in bytes.
413 ///
414 /// If the incoming name data is larger than this, it gets truncated to this.
415 /// If it is smaller, it gets 0-padded to fill the whole field. In either case,
416 /// this fixed amount of space is reserved in the result buffer.
417 msghdr_name_len: usize,
418
419 name_data: &'buf [u8],
420 control_data: &'buf [u8],
421 payload_data: &'buf [u8],
422}
423
424impl<'buf> RecvMsgOut<'buf> {
425 const DATA_START: usize = std::mem::size_of::<sys::io_uring_recvmsg_out>();
426
427 /// Parse the data buffered upon completion of a `RecvMsg` multishot operation.
428 ///
429 /// `buffer` is the whole buffer previously provided to the ring, while `msghdr`
430 /// is the same content provided as input to the corresponding SQE
431 /// (only `msg_namelen` and `msg_controllen` fields are relevant).
432 #[allow(clippy::result_unit_err)]
433 #[allow(clippy::useless_conversion)]
434 pub fn parse(buffer: &'buf [u8], msghdr: &libc::msghdr) -> Result<Self, ()> {
435 let msghdr_name_len = usize::try_from(msghdr.msg_namelen).unwrap();
436 let msghdr_control_len = usize::try_from(msghdr.msg_controllen).unwrap();
437
438 if Self::DATA_START
439 .checked_add(msghdr_name_len)
440 .and_then(|acc| acc.checked_add(msghdr_control_len))
441 .map(|header_len| buffer.len() < header_len)
442 .unwrap_or(true)
443 {
444 return Err(());
445 }
446 // SAFETY: buffer (minimum) length is checked here above.
447 let header = unsafe {
448 buffer
449 .as_ptr()
450 .cast::<sys::io_uring_recvmsg_out>()
451 .read_unaligned()
452 };
453
454 // min is used because the header may indicate the true size of the data
455 // while what we received was truncated.
456 let (name_data, control_start) = {
457 let name_start = Self::DATA_START;
458 let name_data_end =
459 name_start + usize::min(usize::try_from(header.namelen).unwrap(), msghdr_name_len);
460 let name_field_end = name_start + msghdr_name_len;
461 (&buffer[name_start..name_data_end], name_field_end)
462 };
463 let (control_data, payload_start) = {
464 let control_data_end = control_start
465 + usize::min(
466 usize::try_from(header.controllen).unwrap(),
467 msghdr_control_len,
468 );
469 let control_field_end = control_start + msghdr_control_len;
470 (&buffer[control_start..control_data_end], control_field_end)
471 };
472 let payload_data = {
473 let payload_data_end = payload_start
474 + usize::min(
475 usize::try_from(header.payloadlen).unwrap(),
476 buffer.len() - payload_start,
477 );
478 &buffer[payload_start..payload_data_end]
479 };
480
481 Ok(Self {
482 header,
483 msghdr_name_len,
484 name_data,
485 control_data,
486 payload_data,
487 })
488 }
489
490 /// Return the length of the incoming `name` data.
491 ///
492 /// This may be larger than the size of the content returned by
493 /// `name_data()`, if the kernel could not fit all the incoming
494 /// data in the provided buffer size. In that case, name data in
495 /// the result buffer gets truncated.
496 pub fn incoming_name_len(&self) -> u32 {
497 self.header.namelen
498 }
499
500 /// Return whether the incoming name data was larger than the provided limit/buffer.
501 ///
502 /// When `true`, data returned by `name_data()` is truncated and
503 /// incomplete.
504 pub fn is_name_data_truncated(&self) -> bool {
505 self.header.namelen as usize > self.msghdr_name_len
506 }
507
508 /// Message control data, with the same semantics as `msghdr.msg_control`.
509 pub fn name_data(&self) -> &[u8] {
510 self.name_data
511 }
512
513 /// Return the length of the incoming `control` data.
514 ///
515 /// This may be larger than the size of the content returned by
516 /// `control_data()`, if the kernel could not fit all the incoming
517 /// data in the provided buffer size. In that case, control data in
518 /// the result buffer gets truncated.
519 pub fn incoming_control_len(&self) -> u32 {
520 self.header.controllen
521 }
522
523 /// Return whether the incoming control data was larger than the provided limit/buffer.
524 ///
525 /// When `true`, data returned by `control_data()` is truncated and
526 /// incomplete.
527 pub fn is_control_data_truncated(&self) -> bool {
528 (self.header.flags & u32::try_from(libc::MSG_CTRUNC).unwrap()) != 0
529 }
530
531 /// Message control data, with the same semantics as `msghdr.msg_control`.
532 pub fn control_data(&self) -> &[u8] {
533 self.control_data
534 }
535
536 /// Return whether the incoming payload was larger than the provided limit/buffer.
537 ///
538 /// When `true`, data returned by `payload_data()` is truncated and
539 /// incomplete.
540 pub fn is_payload_truncated(&self) -> bool {
541 (self.header.flags & u32::try_from(libc::MSG_TRUNC).unwrap()) != 0
542 }
543
544 /// Message payload, as buffered by the kernel.
545 pub fn payload_data(&self) -> &[u8] {
546 self.payload_data
547 }
548
549 /// Return the length of the incoming `payload` data.
550 ///
551 /// This may be larger than the size of the content returned by
552 /// `payload_data()`, if the kernel could not fit all the incoming
553 /// data in the provided buffer size. In that case, payload data in
554 /// the result buffer gets truncated.
555 pub fn incoming_payload_len(&self) -> u32 {
556 self.header.payloadlen
557 }
558
559 /// Message flags, with the same semantics as `msghdr.msg_flags`.
560 pub fn flags(&self) -> u32 {
561 self.header.flags
562 }
563}
564
565/// [CancelBuilder] constructs match criteria for request cancellation.
566///
567/// The [CancelBuilder] can be used to selectively cancel one or more requests
568/// by user_data, fd, fixed fd, or unconditionally.
569///
570/// ### Examples
571///
572/// ```
573/// use io_uring::types::{CancelBuilder, Fd, Fixed};
574///
575/// // Match all in-flight requests.
576/// CancelBuilder::any();
577///
578/// // Match a single request with user_data = 42.
579/// CancelBuilder::user_data(42);
580///
581/// // Match a single request with fd = 42.
582/// CancelBuilder::fd(Fd(42));
583///
584/// // Match a single request with fixed fd = 42.
585/// CancelBuilder::fd(Fixed(42));
586///
587/// // Match all in-flight requests with user_data = 42.
588/// CancelBuilder::user_data(42).all();
589/// ```
590#[derive(Debug)]
591pub struct CancelBuilder {
592 pub(crate) flags: AsyncCancelFlags,
593 pub(crate) user_data: Option<u64>,
594 pub(crate) fd: Option<sealed::Target>,
595}
596
597impl CancelBuilder {
598 /// Create a new [CancelBuilder] which will match any in-flight request.
599 ///
600 /// This will cancel every in-flight request in the ring.
601 ///
602 /// Async cancellation matching any requests is only available since 5.19.
603 pub const fn any() -> Self {
604 Self {
605 flags: AsyncCancelFlags::ANY,
606 user_data: None,
607 fd: None,
608 }
609 }
610
611 /// Create a new [CancelBuilder] which will match in-flight requests
612 /// with the given `user_data` value.
613 ///
614 /// The first request with the given `user_data` value will be canceled.
615 /// [CancelBuilder::all](#method.all) can be called to instead match every
616 /// request with the provided `user_data` value.
617 pub const fn user_data(user_data: u64) -> Self {
618 Self {
619 flags: AsyncCancelFlags::empty(),
620 user_data: Some(user_data),
621 fd: None,
622 }
623 }
624
625 /// Create a new [CancelBuilder] which will match in-flight requests with
626 /// the given `fd` value.
627 ///
628 /// The first request with the given `fd` value will be canceled. [CancelBuilder::all](#method.all)
629 /// can be called to instead match every request with the provided `fd` value.
630 ///
631 /// FD async cancellation is only available since 5.19.
632 pub fn fd(fd: impl sealed::UseFixed) -> Self {
633 let mut flags = AsyncCancelFlags::FD;
634 let target = fd.into();
635 if matches!(target, sealed::Target::Fixed(_)) {
636 flags.insert(AsyncCancelFlags::FD_FIXED);
637 }
638 Self {
639 flags,
640 user_data: None,
641 fd: Some(target),
642 }
643 }
644
645 /// Modify the [CancelBuilder] match criteria to match all in-flight requests
646 /// rather than just the first one.
647 ///
648 /// This has no effect when combined with [CancelBuilder::any](#method.any).
649 ///
650 /// Async cancellation matching all requests is only available since 5.19.
651 pub fn all(mut self) -> Self {
652 self.flags.insert(AsyncCancelFlags::ALL);
653 self
654 }
655
656 pub(crate) fn to_fd(&self) -> i32 {
657 self.fd
658 .as_ref()
659 .map(|target| match *target {
660 sealed::Target::Fd(fd) => fd,
661 sealed::Target::Fixed(idx) => idx as i32,
662 })
663 .unwrap_or(-1)
664 }
665}
666
667/// Wrapper around `futex_waitv` as used in [`futex_waitv` system
668/// call](https://www.kernel.org/doc/html/latest/userspace-api/futex2.html).
669#[derive(Default, Debug, Clone, Copy)]
670#[repr(transparent)]
671pub struct FutexWaitV(sys::futex_waitv);
672
673impl FutexWaitV {
674 pub const fn new() -> Self {
675 Self(sys::futex_waitv {
676 val: 0,
677 uaddr: 0,
678 flags: 0,
679 __reserved: 0,
680 })
681 }
682
683 pub const fn val(mut self, val: u64) -> Self {
684 self.0.val = val;
685 self
686 }
687
688 pub const fn uaddr(mut self, uaddr: u64) -> Self {
689 self.0.uaddr = uaddr;
690 self
691 }
692
693 pub const fn flags(mut self, flags: u32) -> Self {
694 self.0.flags = flags;
695 self
696 }
697}
698
699#[cfg(test)]
700mod tests {
701 use std::time::Duration;
702
703 use crate::types::sealed::Target;
704
705 use super::*;
706
707 #[test]
708 fn timespec_from_duration_converts_correctly() {
709 let duration = Duration::new(2, 500);
710 let timespec = Timespec::from(duration);
711
712 assert_eq!(timespec.0.tv_sec as u64, duration.as_secs());
713 assert_eq!(timespec.0.tv_nsec as u32, duration.subsec_nanos());
714 }
715
716 #[test]
717 fn test_cancel_builder_flags() {
718 let cb = CancelBuilder::any();
719 assert_eq!(cb.flags, AsyncCancelFlags::ANY);
720
721 let mut cb = CancelBuilder::user_data(42);
722 assert_eq!(cb.flags, AsyncCancelFlags::empty());
723 assert_eq!(cb.user_data, Some(42));
724 assert!(cb.fd.is_none());
725 cb = cb.all();
726 assert_eq!(cb.flags, AsyncCancelFlags::ALL);
727
728 let mut cb = CancelBuilder::fd(Fd(42));
729 assert_eq!(cb.flags, AsyncCancelFlags::FD);
730 assert!(matches!(cb.fd, Some(Target::Fd(42))));
731 assert!(cb.user_data.is_none());
732 cb = cb.all();
733 assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::ALL);
734
735 let mut cb = CancelBuilder::fd(Fixed(42));
736 assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED);
737 assert!(matches!(cb.fd, Some(Target::Fixed(42))));
738 assert!(cb.user_data.is_none());
739 cb = cb.all();
740 assert_eq!(
741 cb.flags,
742 AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED | AsyncCancelFlags::ALL
743 );
744 }
745}