1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
#![forbid(unsafe_code)]

use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use std::any::type_name;
use std::cell::Cell;
use std::fmt::{Debug, Formatter};
use std::sync::mpsc::{RecvError, SendError, TryRecvError, TrySendError};
use std::sync::{Arc, Mutex};
use std::task::Waker;

pub struct Inner {
    sender_wakers: Vec<Waker>,
    receiver_waker: Option<Waker>,
}

pub struct OneSender<T: Send> {
    std_sender: Option<std::sync::mpsc::SyncSender<T>>,
    inner: Arc<Mutex<Inner>>,
}
impl<T: Send> OneSender<T> {
    /// Saves the value in the channel buffer and consumes the sender.
    ///
    /// Note that the receiver may drop before reading the value.
    ///
    /// # Errors
    /// When the receiver is already dropped, returns `SendError` and the value.
    #[allow(clippy::missing_panics_doc)]
    pub fn send(mut self, value: T) -> Result<(), SendError<T>> {
        self.std_sender.take().unwrap().send(value)
        // This method consumes self.  When self drops, it wakes any receiver.
    }
}
impl<T: Send> Drop for OneSender<T> {
    fn drop(&mut self) {
        let mut inner_guard = self.inner.lock().unwrap();
        self.std_sender.take();
        let opt_waker = inner_guard.receiver_waker.take();
        drop(inner_guard);
        if let Some(waker) = opt_waker {
            waker.wake();
        }
    }
}
impl<T: Send> Debug for OneSender<T> {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        write!(f, "OneSender<{}>", type_name::<T>())
    }
}

pub struct SendFut<T: Send> {
    std_sender: std::sync::mpsc::SyncSender<T>,
    inner: Arc<Mutex<Inner>>,
    value: Cell<Option<T>>,
}
impl<T: Send> Future for SendFut<T> {
    type Output = Result<(), SendError<T>>;

    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
        // The enclosing Rust-generated `async_send future future prevents re-polling.
        // It panics at '`async fn` resumed after completion'.  So this can never panic:
        let value = self.value.take().take().unwrap();
        let mut inner_guard = self.inner.lock().unwrap();
        match self.std_sender.try_send(value) {
            Ok(()) => Poll::Ready(Ok(())),
            Err(TrySendError::Disconnected(value)) => Poll::Ready(Err(SendError(value))),
            Err(TrySendError::Full(value)) => {
                self.value.set(Some(value));
                inner_guard.sender_wakers.push(cx.waker().clone());
                Poll::Pending
            }
        }
    }
}

#[derive(Clone)]
pub struct SyncSender<T: Send> {
    std_sender: Option<std::sync::mpsc::SyncSender<T>>,
    inner: Arc<Mutex<Inner>>,
}
impl<T: Send + Clone> SyncSender<T> {
    /// Sends a value on this synchronous channel.
    ///
    /// This function will block until space in the internal buffer becomes available or a receiver
    /// is available to hand off the message to.
    ///
    /// Note that a successful send does not guarantee that the receiver will ever see the data.
    /// Items may be enqueued in the internal buffer for the receiver to receive at a later time.
    ///
    /// # Errors
    /// This function will never panic, but it may return Err if the Receiver has disconnected and
    /// is no longer able to receive information.
    #[allow(clippy::missing_panics_doc)]
    pub async fn async_send(&self, value: T) -> Result<(), SendError<T>> {
        self.wake_receiver_if_ok(
            SendFut {
                std_sender: self.std_sender.as_ref().unwrap().clone(),
                inner: self.inner.clone(),
                value: Cell::new(Some(value)),
            }
            .await,
        )
    }
}
impl<T: Send> SyncSender<T> {
    fn wake_receiver(&self) {
        let opt_waker = self.inner.lock().unwrap().receiver_waker.take();
        if let Some(waker) = opt_waker {
            waker.wake();
        }
    }

    fn wake_receiver_if_ok<E>(&self, result: Result<(), E>) -> Result<(), E> {
        if result.is_ok() {
            self.wake_receiver();
        }
        result
    }

    /// Sends a value on this synchronous channel.
    ///
    /// This function will block until space in the internal buffer becomes available or a receiver
    /// is available to hand off the message to.
    ///
    /// Note that a successful send does not guarantee that the receiver will ever see the data.
    /// Items may be enqueued in the internal buffer for the receiver to receive at a later time.
    ///
    /// # Errors
    /// This function will never panic, but it may return Err if the Receiver has disconnected and
    /// is no longer able to receive information.
    #[allow(clippy::missing_panics_doc)]
    pub fn send(&self, value: T) -> Result<(), SendError<T>> {
        self.wake_receiver_if_ok(self.std_sender.as_ref().unwrap().send(value))
    }

    /// Attempts to send a value on this channel.  Returns immediately.
    ///
    /// # Errors
    /// Returns [`TrySendError::Full`] when the channel's buffer is full.
    ///
    /// Returns [`TrySendError::Disconnected`] when the channel's receiver has been dropped.
    ///
    /// [`TrySendError::Full`]: std::sync::mpsc::TrySendError
    /// [`TrySendError::Disconnected`]: std::sync::mpsc::TrySendError
    #[allow(clippy::missing_panics_doc)]
    pub fn try_send(&self, value: T) -> Result<(), std::sync::mpsc::TrySendError<T>> {
        self.wake_receiver_if_ok(self.std_sender.as_ref().unwrap().try_send(value))
    }
}
impl<T: Send> Drop for SyncSender<T> {
    fn drop(&mut self) {
        let mut inner_guard = self.inner.lock().unwrap();
        self.std_sender.take();
        if Arc::strong_count(&self.inner) < 3 {
            // Either the receiver is already dropped or we are the last sender.
            // Either way, it's safe to wake any receiver.
            let opt_waker = inner_guard.receiver_waker.take();
            drop(inner_guard);
            if let Some(waker) = opt_waker {
                waker.wake();
            }
        }
    }
}
impl<T: Send> Debug for SyncSender<T> {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        write!(f, "SyncSender<{}>", type_name::<T>())
    }
}

/// The receiving half of a channel.  This half can only be owned by one thread.
///
/// To receive messages sent to the channel,
/// call `async_recv` or one of the blocking receive methods.
pub struct Receiver<T>
where
    T: Send,
{
    std_receiver: Option<std::sync::mpsc::Receiver<T>>,
    inner: Arc<Mutex<Inner>>,
}
impl<T: Send> Receiver<T> {
    fn wake_senders(&self) {
        let wakers: Vec<Waker> = std::mem::take(&mut self.inner.lock().unwrap().sender_wakers);
        for waker in wakers {
            waker.wake();
        }
    }

    fn wake_senders_if_ok<E>(&self, result: Result<T, E>) -> Result<T, E> {
        if result.is_ok() {
            self.wake_senders();
        }
        result
    }

    /// Attempts to wait for a value on this receiver, returning an error if the corresponding
    /// channel has hung up.
    ///
    /// The future returned by this function completes when there is data available
    /// or it's not possible for more data to be sent (because all senders were dropped).
    /// Once a message is sent to the corresponding sender,
    /// this receiver will wake up and return that message.
    ///
    /// If the corresponding sender has disconnected, or it disconnects while this call is blocking,
    /// this call will wake up and return Err to indicate that no more messages can ever be
    /// received on this channel.  However, since channels are buffered, messages sent before the
    /// disconnect will still be properly received.
    #[allow(clippy::missing_errors_doc)]
    pub async fn async_recv(&mut self) -> Result<T, std::sync::mpsc::RecvError> {
        let result = (&mut *self).await;
        self.wake_senders_if_ok(result)
    }

    /// Attempts to return a pending value on this receiver without blocking.
    ///
    /// This method will never block the caller in order to wait for data to become available.
    /// Instead, this will always return immediately with a possible option of pending data on the
    /// channel.
    ///
    /// This is useful for a flavor of “optimistic check” before deciding to block on a receiver.
    ///
    /// Compared with `recv`, this function has two failure cases
    /// instead of one (one for disconnection, one for an empty buffer).
    #[allow(clippy::missing_errors_doc)]
    #[allow(clippy::missing_panics_doc)]
    pub fn try_recv(&self) -> Result<T, std::sync::mpsc::TryRecvError> {
        self.wake_senders_if_ok(self.std_receiver.as_ref().unwrap().try_recv())
    }

    /// Attempts to wait for a value on this receiver, returning an error if the corresponding
    /// channel has hung up.
    ///
    /// This function will always block the current thread if there is no data available
    /// and it’s possible for more data to be sent (at least one sender still exists).
    /// Once a message is sent to the corresponding sender,
    /// this receiver will wake up and return that message.
    ///
    /// If the corresponding sender has disconnected, or it disconnects while this call is blocking,
    /// this call will wake up and return Err to indicate that no more messages can ever be
    /// received on this channel.  However, since channels are buffered, messages sent before the
    /// disconnect will still be properly received.
    #[allow(clippy::missing_errors_doc)]
    #[allow(clippy::missing_panics_doc)]
    pub fn recv(&self) -> Result<T, std::sync::mpsc::RecvError> {
        self.wake_senders_if_ok(self.std_receiver.as_ref().unwrap().recv())
    }

    /// Attempts to wait for a value on this receiver, returning an error if the corresponding
    /// channel has hung up, or if it waits more than timeout.
    ///
    /// This function will always block the current thread if there is no data available and it’s
    /// possible for more data to be sent (at least one sender still exists).  Once a message is
    /// sent to the corresponding sender, this receiver will wake up and return that message.
    ///
    /// If the corresponding sender has disconnected, or it disconnects while this call is blocking,
    /// this call will wake up and return Err to indicate that no more messages can ever be received
    /// on this channel.  However, since channels are buffered, messages sent before the disconnect
    /// will still be properly received.
    ///
    /// # Known Issues
    /// There is currently a known issue in the inner `std::sync::mpsc::Receiver`
    /// that can cause `recv_timeout` to panic unexpectedly.  See the explanation at
    /// [`std::sync::mpsc::Receiver::recv_timeout`](https://doc.rust-lang.org/std/sync/mpsc/struct.Receiver.html#method.recv_timeout).
    #[allow(clippy::missing_errors_doc)]
    #[allow(clippy::missing_panics_doc)]
    pub fn recv_timeout(
        &self,
        timeout: core::time::Duration,
    ) -> Result<T, std::sync::mpsc::RecvTimeoutError> {
        self.wake_senders_if_ok(self.std_receiver.as_ref().unwrap().recv_timeout(timeout))
    }

    /// Attempts to wait for a value on this receiver, returning an error if the corresponding
    /// channel has hung up, or if deadline is reached.
    ///
    /// This function will always block the current thread if there is no data available and it’s
    /// possible for more data to be sent.  Once a message is sent to the corresponding sender,
    /// then this receiver will wake up and return that message.
    ///
    /// If the corresponding Sender has disconnected, or it disconnects while this call is blocking,
    /// this call will wake up and return Err to indicate that no more messages can ever be received
    /// on this channel.  However, since channels are buffered, messages sent before the disconnect
    /// will still be properly received.
    #[cfg(unstble)]
    #[allow(clippy::missing_errors_doc)]
    pub fn recv_deadline(
        &self,
        deadline: std::time::Instant,
    ) -> Result<T, std::sync::mpsc::RecvTimeoutError> {
        self.wake_senders_if_ok(self.std_receiver.as_ref().unwrap().recv_deadline(deadline))
    }

    /// Returns an iterator that will block waiting for messages, but never panic.
    /// It will return `None` when the channel has hung up.
    pub fn iter(&self) -> Iter<'_, T> {
        Iter { rx: self }
    }

    /// Returns an iterator that will attempt to yield all pending values.
    /// It will return `None` if there are no more pending values or if the channel has hung up.
    /// The iterator will never panic or block the user by waiting for values.
    pub fn try_iter(&self) -> TryIter<'_, T> {
        TryIter { rx: self }
    }
}
impl<T: Send> Drop for Receiver<T> {
    fn drop(&mut self) {
        let mut inner_guard = self.inner.lock().unwrap();
        self.std_receiver.take();
        let receiver_waker = inner_guard.receiver_waker.take();
        let sender_wakers: Vec<Waker> = std::mem::take(&mut inner_guard.sender_wakers);
        drop(inner_guard);
        drop(receiver_waker);
        for waker in sender_wakers {
            waker.wake();
        }
    }
}
#[doc(hidden)]
impl<T: Send> Future for Receiver<T> {
    type Output = Result<T, std::sync::mpsc::RecvError>;

    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
        let mut inner_guard = self.inner.lock().unwrap();
        match self.std_receiver.as_ref().unwrap().try_recv() {
            Ok(value) => Poll::Ready(Ok(value)),
            Err(TryRecvError::Disconnected) => Poll::Ready(Err(RecvError)),
            Err(TryRecvError::Empty) => {
                let waker = cx.waker().clone();
                if Arc::strong_count(&self.inner) < 2 {
                    // Last sender dropped.
                    Poll::Ready(Err(RecvError))
                } else {
                    let opt_waker = inner_guard.receiver_waker.replace(waker);
                    drop(inner_guard);
                    drop(opt_waker);
                    Poll::Pending
                }
            }
        }
    }
}
impl<T: Send> Debug for Receiver<T> {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        write!(f, "Receiver<{}>", type_name::<T>())
    }
}
impl<'a, T: Send> IntoIterator for &'a Receiver<T> {
    type Item = T;
    type IntoIter = Iter<'a, T>;

    fn into_iter(self) -> Iter<'a, T> {
        self.iter()
    }
}

/// An iterator over messages on a [`Receiver`], created by [`iter`].
///
/// This iterator will block whenever [`next`] is called,
/// waiting for a new message, and [`None`] will be returned
/// when the corresponding channel has hung up.
///
/// [`iter`]: Receiver::iter
/// [`next`]: Iterator::next
#[derive(Debug)]
pub struct Iter<'a, T: 'a + Send> {
    rx: &'a Receiver<T>,
}
impl<'a, T: Send> Iterator for Iter<'a, T> {
    type Item = T;

    fn next(&mut self) -> Option<T> {
        self.rx.recv().ok()
    }
}

/// An owning iterator over messages on a [`Receiver`],
/// created by [`into_iter`].
///
/// This iterator will block whenever [`next`]
/// is called, waiting for a new message, and [`None`] will be
/// returned if the corresponding channel has hung up.
///
/// [`into_iter`]: Receiver::into_iter
/// [`next`]: Iterator::next
#[derive(Debug)]
pub struct IntoIter<T: Send> {
    rx: Receiver<T>,
}
impl<T: Send> Iterator for IntoIter<T> {
    type Item = T;
    fn next(&mut self) -> Option<T> {
        self.rx.recv().ok()
    }
}
impl<T: Send> IntoIterator for Receiver<T> {
    type Item = T;
    type IntoIter = IntoIter<T>;

    fn into_iter(self) -> IntoIter<T> {
        IntoIter { rx: self }
    }
}

/// An iterator that attempts to yield all pending values for a [`Receiver`],
/// created by [`try_iter`].
///
/// [`None`] will be returned when there are no pending values remaining or
/// if the corresponding channel has hung up.
///
/// This iterator will never block the caller in order to wait for data to
/// become available. Instead, it will return [`None`].
///
/// [`try_iter`]: Receiver::try_iter
#[derive(Debug)]
pub struct TryIter<'a, T: 'a + Send> {
    rx: &'a Receiver<T>,
}
impl<'a, T: Send> Iterator for TryIter<'a, T> {
    type Item = T;

    fn next(&mut self) -> Option<T> {
        self.rx.try_recv().ok()
    }
}

/// Creates a channel that can be used to send a single value.
///
/// Use the returned `Receiver` to get the value.
#[must_use]
pub fn oneshot<T>() -> (OneSender<T>, Receiver<T>)
where
    T: Send,
{
    let (std_sender, std_receiver) = std::sync::mpsc::sync_channel(1);
    let inner = Arc::new(Mutex::new(Inner {
        sender_wakers: Vec::new(),
        receiver_waker: None,
    }));
    (
        OneSender {
            std_sender: Some(std_sender),
            inner: inner.clone(),
        },
        Receiver {
            std_receiver: Some(std_receiver),
            inner,
        },
    )
}

/// Creates a new synchronous, bounded channel.
/// All data sent on the [`SyncSender`] will become available on the [`Receiver`]
/// in the same order as it was sent.
/// The [`Receiver`] will block until a message becomes available.
///
/// This channel has an internal buffer on which messages will be queued.
/// `bound` specifies the buffer size. When the internal buffer becomes full,
/// future sends will wait for the buffer to open up.
///
/// The [`SyncSender`] can be cloned to [`send`] to the same channel multiple
/// times, but only one [`Receiver`] is supported.
///
/// Like asynchronous channels, if the [`Receiver`] is disconnected while trying
/// to [`send`] with the [`SyncSender`], the [`send`] method will return a
/// [`SendError`]. Similarly, If the [`SyncSender`] is disconnected while trying
/// to [`recv`], the [`recv`] method will return a [`RecvError`].
///
/// [`send`]: SyncSender::send
/// [`recv`]: Receiver::recv
///
/// # Panics
/// Panics if `bound` is zero.
#[must_use]
#[allow(clippy::module_name_repetitions)]
pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>)
where
    T: Send,
{
    assert!(bound > 0, "bound must be greater than zero");
    let (std_sender, std_receiver) = std::sync::mpsc::sync_channel(bound);
    let inner = Arc::new(Mutex::new(Inner {
        sender_wakers: Vec::new(),
        receiver_waker: None,
    }));
    (
        SyncSender {
            std_sender: Some(std_sender),
            inner: inner.clone(),
        },
        Receiver {
            std_receiver: Some(std_receiver),
            inner,
        },
    )
}