zsh 0.8.13

Zsh interpreter and parser in Rust
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
use crate::fd_readable_set::{FdReadableSet, Timeout};
use crate::flog::flog;
use crate::portable_atomic::AtomicU64;
use crate::threads::assert_is_background_thread;
use crate::wutil::perror_nix;
use cfg_if::cfg_if;
use errno::errno;
use fish_common::exit_without_destructors;
use fish_util::perror;
use libc::{EAGAIN, EINTR, EWOULDBLOCK};
use std::collections::HashMap;
use std::os::unix::prelude::*;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex};
use std::time::Duration;

cfg_if!(
    if #[cfg(have_eventfd)] {
        use libc::{EFD_CLOEXEC, EFD_NONBLOCK};
    } else {
        use crate::fds::{make_autoclose_pipes, make_fd_nonblocking};
    }
);

/// An event signaller implemented using a file descriptor, so it can plug into
/// [`select()`](libc::select).
///
/// This is like a binary semaphore. A call to [`post()`](FdEventSignaller::post) will
/// signal an event, making the fd readable.  Multiple calls to `post()` may be coalesced.
/// On Linux this uses eventfd, on other systems this uses a pipe.
/// [`try_consume()`](FdEventSignaller::try_consume) may be used to consume the event.
/// Importantly this is async signal safe. Of course it is `CLO_EXEC` as well.
pub struct FdEventSignaller {
    // Always the read end of the fd; maybe the write end as well.
    fd: OwnedFd,
    #[cfg(not(have_eventfd))]
    write: OwnedFd,
}

impl FdEventSignaller {
    /// The default constructor will abort on failure (fd exhaustion).
    /// This should only be used during startup.
    pub fn new() -> Self {
        cfg_if! {
            if #[cfg(have_eventfd)] {
                // Note we do not want to use EFD_SEMAPHORE because we are binary (not counting) semaphore.
                let fd = unsafe { libc::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) };
                if fd < 0 {
                    perror("eventfd");
                    exit_without_destructors(1);
                }
                Self {
                    fd: unsafe { OwnedFd::from_raw_fd(fd) },
                }
            } else {
                // Implementation using pipes.
                let Ok(pipes) = make_autoclose_pipes() else {
                    exit_without_destructors(1);
                };
                make_fd_nonblocking(pipes.read.as_raw_fd()).unwrap();
                make_fd_nonblocking(pipes.write.as_raw_fd()).unwrap();
                Self {
                    fd: pipes.read,
                    write: pipes.write,
                }
            }
        }
    }

    /// Return the fd to read from, for notification.
    pub fn read_fd(&self) -> RawFd {
        self.fd.as_raw_fd()
    }

    /// If an event is signalled, consume it; otherwise return.
    /// This does not block.
    /// This retries on EINTR.
    pub fn try_consume(&self) -> bool {
        // If we are using eventfd, we want to read a single uint64.
        // If we are using pipes, read a lot; note this may leave data on the pipe if post has been
        // called many more times. In no case do we care about the data which is read.
        cfg_if!(
            if #[cfg(have_eventfd)] {
                let mut buff = [0_u64; 1];
            } else {
                let mut buff = [0_u8; 1024];
            }
        );
        let mut ret;
        loop {
            ret =
                unsafe { libc::read(self.read_fd(), buff.as_mut_ptr().cast(), size_of_val(&buff)) };
            if ret >= 0 || errno().0 != EINTR {
                break;
            }
        }
        if ret < 0 && ![EAGAIN, EWOULDBLOCK].contains(&errno().0) {
            perror("read");
        }
        ret > 0
    }

    /// Mark that an event has been received. This may be coalesced.
    /// This retries on EINTR.
    pub fn post(&self) {
        // eventfd writes uint64; pipes write 1 byte.
        cfg_if!(
            if #[cfg(have_eventfd)] {
                let c = 1_u64;
            } else {
                let c = 1_u8;
            }
        );
        let mut ret;
        loop {
            let bytes = c.to_ne_bytes();
            ret = nix::unistd::write(unsafe { BorrowedFd::borrow_raw(self.write_fd()) }, &bytes);

            match ret {
                Ok(_) => break,
                Err(nix::Error::EINTR) => continue,
                Err(_) => break,
            }
        }

        if let Err(err) = ret {
            // EAGAIN occurs if either the pipe buffer is full or the eventfd overflows (very unlikely).
            if ![nix::Error::EAGAIN, nix::Error::EWOULDBLOCK].contains(&err) {
                perror_nix("write", err);
            }
        }
    }

    /// Perform a poll to see if an event is received.
    /// If `wait` is set, wait until it is readable; this does not consume the event
    /// but guarantees that the next call to wait() will not block.
    /// Return true if readable, false if not readable, or not interrupted by a signal.
    pub fn poll(&self, wait: bool /* = false */) -> bool {
        let timeout = if wait {
            Timeout::Forever
        } else {
            Timeout::ZERO
        };
        FdReadableSet::is_fd_readable(self.read_fd(), timeout)
    }

    /// Return the fd to write to.
    fn write_fd(&self) -> RawFd {
        cfg_if! {
            if #[cfg(have_eventfd)] {
                self.fd.as_raw_fd()
            } else {
                self.write.as_raw_fd()
            }
        }
    }
}

/// Each item added to FdMonitor is assigned a unique ID, which is not recycled. Items may have
/// their callback triggered immediately by passing the ID. Zero is a sentinel.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct FdMonitorItemId(u64);

impl From<FdMonitorItemId> for u64 {
    fn from(value: FdMonitorItemId) -> Self {
        value.0
    }
}

impl From<u64> for FdMonitorItemId {
    fn from(value: u64) -> Self {
        FdMonitorItemId(value)
    }
}

/// The callback type used by [`FdMonitorItem`]. It is passed a mutable reference to the
/// `FdMonitorItem`'s [`FdMonitorItem::fd`]. If the fd is closed, the callback will not
/// be invoked again.
pub type Callback = Box<dyn Fn(&mut Option<OwnedFd>) + Send + Sync>;

/// An item containing an fd and callback, which can be monitored to watch when it becomes readable
/// and invoke the callback.
pub struct FdMonitorItem {
    /// The fd to monitor
    fd: Option<OwnedFd>,
    /// A callback to be invoked when the fd is readable, or for another reason given by the wake reason.
    /// If the fd is invalid on return from the function, then the item is removed from the [`FdMonitor`] set.
    callback: Callback,
}

impl FdMonitorItem {
    /// Invoke this item's callback because the fd is readable.
    /// If the given fd is closed, it will be removed from the [`FdMonitor`] set.
    fn service(&mut self) {
        (self.callback)(&mut self.fd);
    }
}

/// A thread-safe class which can monitor a set of fds, invoking a callback when any becomes
/// readable (or has been HUP'd).
pub struct FdMonitor {
    /// Our self-signaller, used to wake up the background thread out of select().
    change_signaller: Arc<FdEventSignaller>,
    /// The data shared between the background thread and the `FdMonitor` instance.
    data: Arc<Mutex<SharedData>>,
    /// The last ID assigned or `0` if none.
    last_id: AtomicU64,
}

// We don't want to manually implement `Sync` for `FdMonitor` but we do want to make sure that it's
// always using interior mutability correctly and therefore automatically `Sync`.
const _: () = {
    // It is sufficient to declare the generic function pointers; calling them too would require
    // using `const fn` with Send/Sync constraints which wasn't stabilized until rustc 1.61.0
    fn assert_sync<T: Sync>() {}
    let _ = assert_sync::<FdMonitor>;
};

/// Data shared between the `FdMonitor` instance and its associated `BackgroundFdMonitor`.
struct SharedData {
    /// The map of items. This may be modified by the main thread with the mutex locked.
    items: HashMap<FdMonitorItemId, FdMonitorItem>,
    /// Whether the background thread is running.
    running: bool,
    /// Used to signal that the background thread should terminate.
    terminate: bool,
}

/// The background half of the fd monitor, running on its own thread.
struct BackgroundFdMonitor {
    /// Our self-signaller. When this is written to, it means there are new items pending, new items
    /// in the poke list, or terminate has been set.
    change_signaller: Arc<FdEventSignaller>,
    /// The data shared between the background thread and the `FdMonitor` instance.
    /// Note the locking here is very coarse and the lock is held while servicing items.
    /// This means that an item which reads a lot of data may prevent adding other items.
    /// When we do true multithreaded execution, we may want to make the locking more fine-grained (per-item).
    data: Arc<Mutex<SharedData>>,
}

impl FdMonitor {
    /// Add an item to the monitor. Returns the [`FdMonitorItemId`] assigned to the item.
    pub fn add(&self, fd: OwnedFd, callback: Callback) -> FdMonitorItemId {
        let item_id = self.last_id.fetch_add(1, Ordering::Relaxed) + 1;
        let item_id = FdMonitorItemId(item_id);
        let item: FdMonitorItem = FdMonitorItem {
            fd: Some(fd),
            callback,
        };
        let start_thread = {
            // Lock around a local region
            let mut data = self.data.lock().expect("Mutex poisoned!");

            // Assign an id and add the item.
            let old_value = data.items.insert(item_id, item);
            assert!(old_value.is_none(), "Item ID {} already exists!", item_id.0);

            // Start the thread if it hasn't already been started
            let already_started = data.running;
            data.running = true;
            !already_started
        };

        if start_thread {
            flog!(fd_monitor, "Thread starting");
            let background_monitor = BackgroundFdMonitor {
                data: Arc::clone(&self.data),
                change_signaller: Arc::clone(&self.change_signaller),
            };
            crate::threads::spawn(move || {
                background_monitor.run();
            });
        }

        // Tickle our signaller.
        self.change_signaller.post();

        item_id
    }

    pub fn with_fd(&self, item_id: FdMonitorItemId, cb: impl FnOnce(BorrowedFd)) {
        let data = self.data.lock().expect("Mutex poisoned!");
        if let Some(fd) = &data.items.get(&item_id).unwrap().fd {
            cb(fd.as_fd());
        }
    }

    /// Remove an item from the monitor and return its file descriptor.
    /// Note we may remove an item whose fd is currently being waited on in select(); this is
    /// considered benign because the underlying item will no longer be present and so its
    /// callback will not be invoked.
    pub fn remove_item(&self, item_id: FdMonitorItemId) -> Option<OwnedFd> {
        assert!(item_id.0 > 0, "Invalid item id!");
        let mut data = self.data.lock().expect("Mutex poisoned!");
        let removed = data.items.remove(&item_id).expect("Item ID not found");
        drop(data);
        // Allow it to recompute the wait set.
        self.change_signaller.post();
        removed.fd
    }

    pub fn new() -> Self {
        Self {
            data: Arc::new(Mutex::new(SharedData {
                items: HashMap::new(),
                running: false,
                terminate: false,
            })),
            change_signaller: Arc::new(FdEventSignaller::new()),
            last_id: AtomicU64::new(0),
        }
    }
}

impl BackgroundFdMonitor {
    /// Starts monitoring the fd set and listening for new fds to add to the set. Takes ownership
    /// over its instance so that this method cannot be called again.
    fn run(self) {
        assert_is_background_thread();

        let mut fds = FdReadableSet::new();
        let mut item_ids: Vec<FdMonitorItemId> = Vec::new();

        loop {
            // Our general flow is that a client thread adds an item for us to monitor,
            // and we use select() or poll() to wait on it. However, the client thread
            // may then reclaim the item. We are currently blocked in select():
            // how then do we stop waiting on it?
            //
            // The safest, slowest approach is:
            //  - The background thread waits on select() for the set of active file descriptors.
            //  - The client thread records a request to remove an item.
            //  - The client thread wakes up the background thread via change_signaller.
            //  - The background thread check for any pending removals, and removes and returns them.
            //  - The client thread accepts the removed item and continues on.
            // However this means a round-trip from the client thread to this background thread,
            // plus additional blocking system calls. This slows down the client thread.
            //
            // A second possibility is that:
            //  - The background thread waits on select() for the set of active file descriptors.
            //  - The client thread directly removes an item (protected by the mutex).
            //  - After select() returns the set of active file descriptors, we only invoke callbacks
            //    for items whose file descriptors are still in the set.
            // However this risks the ABA problem: if the client thread reclaims an item, closes its
            // fd, and then adds a new item which happens to get the same fd, we might falsely
            // trigger the callback of the new item even though its fd is not readable.
            //
            // So we use the following approach:
            // - The background thread creates a snapshotted list of active ItemIDs.
            // - The background thread waits in select() on the set of active file descriptors,
            //   without holding the lock.
            // - The client thread directly removes an item (protected by the mutex).
            // - After select() returns the set of active file descriptors, we only invoke callbacks
            //   for items whose file descriptors are marked active, and whose ItemID was snapshotted.
            //
            // This avoids the ABA problem because ItemIDs are never recycled. It does have a race where
            // we might select() on a file descriptor that has been closed or recycled. Thus we must be
            // prepared to handle EBADF. This race is otherwise considered benign.

            // Construct the set of fds to monitor.
            // Our change_signaller is special-cased.
            fds.clear();
            let change_signal_fd = self.change_signaller.read_fd();
            fds.add(change_signal_fd);

            // Grab the lock and snapshot the item_ids. Skip items with invalid fds.
            let mut data = self.data.lock().expect("Mutex poisoned!");
            item_ids.clear();
            item_ids.reserve(data.items.len());
            for (item_id, item) in &data.items {
                if let Some(fd) = &item.fd {
                    fds.add(fd.as_raw_fd());
                    item_ids.push(*item_id);
                }
            }

            // Sort it to avoid the non-determinism of the hash table.
            item_ids.sort_unstable();

            // If we have no items, then we wish to allow the thread to exit, but after a time, so
            // we aren't spinning up and tearing down the thread repeatedly. Set a timeout of 256
            // msec; if nothing becomes readable by then we will exit. We refer to this as the
            // wait-lap.
            let is_wait_lap = item_ids.is_empty();
            let timeout = if is_wait_lap {
                Some(Duration::from_millis(256))
            } else {
                None
            };

            // Call select().
            // We must release and then re-acquire the lock around select() to avoid deadlock.
            // Note that while we are waiting in select(), the client thread may add or remove items;
            // in particular it may even close file descriptors that we are waiting on. That is why
            // we handle EBADF. Note that even if the file descriptor is recycled, we don't invoke
            // a callback for it unless its ItemID is still present.
            //
            // Note that WSLv1 doesn't throw EBADF if the fd is closed is mid-select.
            drop(data);
            let ret = fds.check_readable(timeout.map_or(Timeout::Forever, Timeout::Duration));
            // Cygwin reports ret < 0 && errno == 0 as success.
            let err = errno().0;
            if ret < 0
                && !matches!(err, libc::EINTR | libc::EBADF | libc::EAGAIN)
                && !(cfg!(cygwin) && err == 0)
            {
                // Surprising error
                perror("select");
            }

            // Re-acquire the lock.
            data = self.data.lock().expect("Mutex poisoned!");

            // For each item id that we snapshotted, if the corresponding item is still in our
            // set of active items and its fd was readable, then service it.
            for item_id in &item_ids {
                let Some(item) = data.items.get_mut(item_id) else {
                    // Item was removed while we were waiting.
                    // Note there is no risk of an ABA problem because ItemIDs are never recycled.
                    continue;
                };
                if item.fd.as_ref().is_some_and(|fd| fds.test(fd.as_raw_fd())) {
                    item.service();
                }
            }

            // Handle any changes if the change signaller was set. Alternatively, this may be the
            // wait lap, in which case we might want to commit to exiting.
            let change_signalled = fds.test(change_signal_fd);
            if change_signalled || is_wait_lap {
                // Clear the change signaller before processing incoming changes
                self.change_signaller.try_consume();

                if data.terminate || (is_wait_lap && data.items.is_empty() && !change_signalled) {
                    // Maybe terminate is set. Alternatively, maybe we had no items, waited a bit,
                    // and still have no items. It's important to do this while holding the lock,
                    // otherwise we race with new items being added.
                    assert!(
                        data.running,
                        "Thread should be running because we're that thread"
                    );
                    flog!(fd_monitor, "Thread exiting");
                    data.running = false;
                    break;
                }
            }
        }
    }
}

/// In ordinary usage, we never invoke the destructor. This is used in the tests to not leave stale
/// fds arounds; this is why it's very hacky!
impl Drop for FdMonitor {
    fn drop(&mut self) {
        self.data.lock().expect("Mutex poisoned!").terminate = true;
        self.change_signaller.post();

        // Safety: see note above.
        while self.data.lock().expect("Mutex poisoned!").running {
            std::thread::sleep(Duration::from_millis(5));
        }
    }
}

#[cfg(test)]
mod tests {
    use crate::portable_atomic::AtomicU64;
    use std::fs::File;
    use std::io::Write as _;
    use std::os::fd::{AsRawFd as _, OwnedFd};
    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::sync::{Arc, Barrier, Mutex};
    use std::thread;
    use std::time::Duration;

    use assert_matches::assert_matches;
    use errno::errno;

    use crate::fd_monitor::{FdEventSignaller, FdMonitor};
    use crate::fd_readable_set::{FdReadableSet, Timeout};
    use crate::fds::{make_autoclose_pipes, AutoClosePipes};
    use crate::tests::prelude::*;

    /// Helper to make an item which counts how many times its callback was invoked.
    ///
    /// This could be structured differently to avoid the `Mutex` on `writer`, but it's not worth it
    /// since this is just used for test purposes.
    struct ItemMaker {
        pub length_read: AtomicUsize,
        pub total_calls: AtomicUsize,
        item_id: AtomicU64,
        pub always_close: bool,
        pub writer: Mutex<Option<File>>,
    }

    impl ItemMaker {
        pub fn insert_new_into(monitor: &FdMonitor) -> Arc<Self> {
            Self::insert_new_into2(monitor, |_| {})
        }

        pub fn insert_new_into2<F: Fn(&mut Self)>(monitor: &FdMonitor, config: F) -> Arc<Self> {
            let pipes = make_autoclose_pipes().expect("fds exhausted!");

            let mut result = ItemMaker {
                length_read: 0.into(),
                total_calls: 0.into(),
                item_id: 0.into(),
                always_close: false,
                writer: Mutex::new(Some(File::from(pipes.write))),
            };

            config(&mut result);

            let result = Arc::new(result);
            let callback = {
                let result = Arc::clone(&result);
                move |fd: &mut Option<OwnedFd>| result.callback(fd)
            };
            let fd = pipes.read;
            let item_id = monitor.add(fd, Box::new(callback));
            result.item_id.store(u64::from(item_id), Ordering::Relaxed);

            result
        }

        fn callback(&self, fd: &mut Option<OwnedFd>) {
            let mut buf = [0u8; 1024];
            let res = nix::unistd::read(fd.as_ref().unwrap(), &mut buf);
            let amt = res.expect("read error!");
            self.length_read.fetch_add(amt, Ordering::Relaxed);
            let was_closed = amt == 0;

            self.total_calls.fetch_add(1, Ordering::Relaxed);
            if was_closed || self.always_close {
                drop(fd.take());
            }
        }

        /// Write 42 bytes to our write end.
        fn write42(&self) {
            let buf = [0u8; 42];
            let mut writer = self.writer.lock().expect("Mutex poisoned!");
            writer
                .as_mut()
                .unwrap()
                .write_all(&buf)
                .expect("Error writing 42 bytes to pipe!");
        }
    }

    #[test]
    #[serial]
    fn fd_monitor_items() {
        let _cleanup = test_init();
        let monitor = FdMonitor::new();

        // Item which will never receive data or be called.
        let item_never = ItemMaker::insert_new_into(&monitor);

        // Item which should get exactly 42 bytes.
        let item42 = ItemMaker::insert_new_into(&monitor);

        // Item which should get 42 bytes then get notified it is closed.
        let item42_then_close = ItemMaker::insert_new_into(&monitor);

        // Item which should get a callback exactly once.
        let item_oneshot = ItemMaker::insert_new_into2(&monitor, |item| {
            item.always_close = true;
        });

        item42.write42();
        item42_then_close.write42();
        *item42_then_close.writer.lock().expect("Mutex poisoned!") = None;
        item_oneshot.write42();

        // May need to loop here to ensure our fd_monitor gets scheduled. See #7699.
        for _ in 0..100 {
            std::thread::sleep(Duration::from_millis(84));
            if item_oneshot.total_calls.load(Ordering::Relaxed) > 0 {
                break;
            }
        }

        drop(monitor);

        assert_eq!(item_never.length_read.load(Ordering::Relaxed), 0);

        assert_eq!(item42.length_read.load(Ordering::Relaxed), 42);

        assert_eq!(item42_then_close.length_read.load(Ordering::Relaxed), 42);
        assert_eq!(item42_then_close.total_calls.load(Ordering::Relaxed), 2);

        assert_eq!(item_oneshot.length_read.load(Ordering::Relaxed), 42);
        assert_eq!(item_oneshot.total_calls.load(Ordering::Relaxed), 1);
    }

    #[test]
    fn test_fd_event_signaller() {
        let sema = FdEventSignaller::new();
        assert!(!sema.try_consume());
        assert!(!sema.poll(false));

        // Post once.
        sema.post();
        assert!(sema.poll(false));
        assert!(sema.poll(false));
        assert!(sema.try_consume());
        assert!(!sema.poll(false));
        assert!(!sema.try_consume());

        // Posts are coalesced.
        sema.post();
        sema.post();
        sema.post();
        assert!(sema.poll(false));
        assert!(sema.poll(false));
        assert!(sema.try_consume());
        assert!(!sema.poll(false));
        assert!(!sema.try_consume());
    }

    // A helper function which calls poll() or selects() on a file descriptor in the background,
    // and then invokes the `bad_action` function on the file descriptor while the poll/select is
    // waiting. The function returns Result<i32, i32>: either the number of readable file descriptors
    // or the error code from poll/select.
    #[cfg(test)]
    fn do_something_bad_during_select<F>(bad_action: F) -> Result<i32, i32>
    where
        F: FnOnce(OwnedFd) -> Option<OwnedFd>,
    {
        let AutoClosePipes {
            read: read_fd,
            write: write_fd,
        } = make_autoclose_pipes().expect("Failed to create pipe");
        let raw_read_fd = read_fd.as_raw_fd();

        // Try to ensure that the thread will be scheduled by waiting until it is.
        let barrier = Arc::new(Barrier::new(2));
        let barrier_clone = Arc::clone(&barrier);

        let select_thread = thread::spawn(move || -> Result<i32, i32> {
            let mut fd_set = FdReadableSet::new();
            fd_set.add(raw_read_fd);

            barrier_clone.wait();

            // Timeout after 500 msec.
            // macOS will eagerly return EBADF if the fd is closed; Linux will hit the timeout.
            let timeout = Timeout::Duration(Duration::from_millis(500));
            let ret = fd_set.check_readable(timeout);
            if ret < 0 {
                Err(errno().0)
            } else {
                Ok(ret)
            }
        });

        barrier.wait();
        thread::sleep(Duration::from_millis(100));
        let read_fd = bad_action(read_fd);

        let result = select_thread.join().expect("Select thread panicked");
        // Ensure these stay alive until after thread is joined.
        drop(read_fd);
        drop(write_fd);
        result
    }

    #[test]
    fn test_close_during_select_ebadf() {
        use crate::common::{is_windows_subsystem_for_linux as is_wsl, WSL};
        let close_it = |read_fd: OwnedFd| {
            drop(read_fd);
            None
        };
        let result = do_something_bad_during_select(close_it);

        // WSLv1 does not error out with EBADF if the fd is closed mid-select.
        // This is OK because we do not _depend_ on this behavior; the only
        // true requirement is that we don't panic in the handling code above.
        assert!(
            is_wsl(WSL::V1) || matches!(result, Err(libc::EBADF) | Ok(0 | 1)),
            "select/poll should have failed with EBADF or timed out or the fd should be ready"
        );
    }

    #[test]
    fn test_dup2_during_select_ebadf() {
        // Make a random file descriptor that we can dup2 stdin to.
        let AutoClosePipes {
            read: pipe_read,
            write: pipe_write,
        } = make_autoclose_pipes().expect("Failed to create pipe");

        let dup2_it = |read_fd: OwnedFd| {
            // We are going to dup2 stdin to this fd, which should cause select/poll to fail.
            assert!(read_fd.as_raw_fd() > 0, "fd should be valid and not stdin");
            unsafe { libc::dup2(pipe_read.as_raw_fd(), read_fd.as_raw_fd()) };
            Some(read_fd)
        };
        let result = do_something_bad_during_select(dup2_it);
        assert_matches!(
            result,
            Err(libc::EBADF) | Ok(0 | 1),
            "select/poll should have failed with EBADF or timed out or the fd should be ready"
        );
        // Ensure these stay alive until after thread is joined.
        drop(pipe_read);
        drop(pipe_write);
    }
}