1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
use std::any::Any;
use std::error::Error;
use std::fmt;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;

use {Future, Poll, Async};
use lock::Lock;
use stream::Stream;
use task::{self, Task};

/// Creates an in-memory channel implementation of the `Stream` trait.
///
/// This method creates a concrete implementation of the `Stream` trait which
/// can be used to send values across threads in a streaming fashion. This
/// channel is unique in that it implements back pressure to ensure that the
/// sender never outpaces the receiver. The `Sender::send` method will only
/// allow sending one message and the next message can only be sent once the
/// first was consumed.
///
/// The `Receiver` returned implements the `Stream` trait and has access to any
/// number of the associated combinators for transforming the result.
pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>) {
    let inner = Arc::new(Inner {
        state: AtomicUsize::new(EMPTY),
        data: Lock::new(None),
        rx_task1: Lock::new(None),
        rx_task2: Lock::new(None),
        tx_task1: Lock::new(None),
        tx_task2: Lock::new(None),
    });
    let sender = Sender {
        inner: inner.clone(),
        flag: true,
    };
    let receiver = Receiver {
        inner: inner,
        flag: true,
    };
    (sender, receiver)
}

/// The transmission end of a channel which is used to send values.
///
/// This is created by the `channel` method in the `stream` module.
pub struct Sender<T, E> {
    inner: Arc<Inner<Result<T, E>>>,
    // described below on `Inner`
    flag: bool,
}

/// A future returned by the `Sender::send` method which will resolve to the
/// sender once it's available to send another message.
#[must_use = "futures do nothing unless polled"]
pub struct FutureSender<T, E> {
    state: Option<(Sender<T, E>, Result<T, E>)>,
}

/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is a concrete implementation of a stream which can be used to represent
/// a stream of values being computed elsewhere. This is created by the
/// `channel` method in the `stream` module.
#[must_use = "streams do nothing unless polled"]
pub struct Receiver<T, E> {
    inner: Arc<Inner<Result<T, E>>>,
    // described below on `Inner`
    flag: bool,
}

/// Internal state shared by the `Sender` and `Receiver` types.
///
/// While this is similar to the oneshot internal state, it's subtly different
/// with an extra `rx_task` and `tx_task` fields for blocking. See comments
/// below for what's what.
struct Inner<T> {
    /// Actual state of this channel, essentially what's going on inside `data`.
    ///
    /// This currently has three valid values (constants below this struct
    /// definition):
    ///
    /// * EMPTY - both the sender and receiver are alive, but there's no data to
    ///           be had in `data`. A receiver must block and a sender can
    ///           proceed.
    /// * DATA - both the sender and receiver are alive, and there's data to be
    ///          retrieved inside of `data`. A receiver can proceed by picking
    ///          out the data but a sender must block to send something else.
    /// * GONE - *either* the sender or receiver is gone (or both). No operation
    ///          should block any more and all data should be handled
    ///          appropriately. Note that if a receiver sees GONE then there may
    ///          still be data inside `data`, so it needs to be checked.
    ///
    /// This isn't really atomically updated in the sense of swap or
    /// compare_exchange, but rather with a few atomic stores here and there
    /// with a sprinkling of compare_exchange. See the code below for more
    /// details.
    state: AtomicUsize,

    /// The actual data being transmitted across this channel.
    ///
    /// This is `Some` if state is `DATA` and `None` if state is `EMPTY`. If
    /// the state is `GONE` then the receiver needs to check this and the sender
    /// should ignore it.
    ///
    /// Note that this probably doesn't need a `Lock` around it and can likely
    /// just be an `UnsafeCell`
    data: Lock<Option<T>>,

    /// Ok, here's where things get tricky. These four fields are for blocked
    /// tasks.
    ///
    /// "Four?!" you might be saying, "surely there can only be at most one task
    /// blocked on a channel" you might also be saying. Well, you're correct!
    /// Due to various subtleties and the desire to never have any task *block*
    /// another (in the sense of a mutex block) these are all required.
    ///
    /// The general gist of what's going on here is that a `Sender` will
    /// alternate storing its blocked task in `tx_task1` and `tx_task2`.
    /// Similarly a `Receiver` will alternate storing a blocked task in
    /// `rx_task1` and `rx_task2`.
    ///
    /// The race that this is trying to solve is this:
    ///
    /// * When the receiver receives a message, it will empty out the data, then
    ///   lock the tx task to wake it up (if one's available).
    /// * The sender, not blocked, then sees that the channel is empty, so it
    ///   sends some data.
    /// * The sender again, not blocked, tries to send some more data, but this
    ///   time its blocked.
    ///
    /// Here we've got a concurrent situation where the receiver is holding the
    /// locked for the tx task, but the sender *also* wants to store a new tx
    /// task to get unblocked. This would involve the sender literally blocking
    /// waiting for the receiver to unlock, so instead we shard up the tx task
    /// locks into two. This means that in the situation above the two halves
    /// will never be racing on the same slot and always have access to block
    /// when they need it.
    ///
    /// Similar logic applies to the receiver (I think...) so there's two rx
    /// task slots here as well.
    ///
    /// TODO: does this really need two rx tasks? I've thought through tx task
    ///       to justify those two but not the other way around.
    rx_task1: Lock<Option<Task>>,
    rx_task2: Lock<Option<Task>>,
    tx_task1: Lock<Option<Task>>,
    tx_task2: Lock<Option<Task>>,
}

const EMPTY: usize = 0;
const DATA: usize = 1;
const GONE: usize = 2;

/// Error type returned by `FutureSender` when the receiving end of a `channel` is dropped
pub struct SendError<T, E>(Result<T, E>);

impl<T, E> fmt::Debug for SendError<T, E> {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
        fmt.debug_tuple("SendError")
            .field(&"...")
            .finish()
    }
}

impl<T, E> fmt::Display for SendError<T, E> {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
        write!(fmt, "send failed because receiver is gone")
    }
}

impl<T, E> Error for SendError<T, E>
    where T: Any, E: Any
{
    fn description(&self) -> &str {
        "send failed because receiver is gone"
    }
}


impl<T, E> Stream for Receiver<T, E> {
    type Item = T;
    type Error = E;

    fn poll(&mut self) -> Poll<Option<T>, E> {
        // First thing's first, let's check out the state of the channel. A
        // local flag is kept which indicates whether we've got data available
        // to us.
        let mut data = false;
        match self.inner.state.load(SeqCst) {
            // If the sender is gone, then we need to take a look inside our
            // `data` field. Fall through below to figure that out.
            GONE => data = true,

            // If we've got data, then we've got data!
            DATA => data = true,

            // If the channel thinks it's empty, then we need to try to block.
            // Take our task and put it in the appropriate slot. If we can't
            // acquire the lock on the slot, then we know it's taken because a
            // sender put some data in the channel and it's trying to wake us
            // up. In that situation we know we've got data.
            EMPTY => {
                let task = task::park();
                match self.rx_task().try_lock() {
                    Some(mut slot) => *slot = Some(task),
                    None => data = true,
                }
            }

            n => panic!("bad state: {}", n),
        }

        // If we *didn't* get data above, then we stored our task to be woken up
        // at a later time. Recheck to cover the race where right before we
        // stored the task a sender put some data on the channel. If we still
        // see `EMPTY`, however, then we're guaranteed any future sender will
        // wake up our task.
        if !data && self.inner.state.load(SeqCst) == EMPTY {
            return Ok(Async::NotReady)
        }

        // We've gotten this far, so extract the data (which is guaranteed to
        // not be contended) and transform it to our return value.
        let ret = match self.inner.data.try_lock().unwrap().take() {
            Some(Ok(e)) => Ok(Some(e).into()),
            Some(Err(e)) => Err(e),
            None => Ok(None.into()),
        };

        // Inform the channel that our data slot is now empty. Note that we use
        // a compare_exchange here to ensure that if the sender goes away (e.g.
        // transitions to GONE) we don't paper over that state.
        drop(self.inner.state.compare_exchange(DATA, EMPTY, SeqCst, SeqCst));

        // Now that we've extracted the data and updated the state of the
        // channel, it's time for us to notify any blocked sender that it can
        // continue to move along if it needs. Take a peek at the tx_task we're
        // waking up and if it's there unpark it.
        //
        // TODO: Should this try_lock be an unwrap()? I... can't think of a case
        //       where the sender should be interfering with this.
        if let Some(mut slot) = self.tx_task().try_lock() {
            if let Some(task) = slot.take() {
                drop(slot);
                task.unpark();
            }
        }

        // And finally, with our successfuly receiving of a message, we flip our
        // flag to switch the slots we're thinking of for rx tasks and tx tasks.
        self.flag = !self.flag;
        return ret
    }
}

impl<T, E> Receiver<T, E> {
    /// Helper method to look at the right slot to store an rx task into, given
    /// how many messages we've sent so far.
    fn rx_task(&self) -> &Lock<Option<Task>> {
        if self.flag {
            &self.inner.rx_task1
        } else {
            &self.inner.rx_task2
        }
    }

    /// Helper method to look at the right slot to store an tx task into, given
    /// how many messages we've sent so far.
    fn tx_task(&self) -> &Lock<Option<Task>> {
        if self.flag {
            &self.inner.tx_task1
        } else {
            &self.inner.tx_task2
        }
    }
}

impl<T, E> Drop for Receiver<T, E> {
    fn drop(&mut self) {
        // First up, inform our sender bretheren that we're going away by
        // transitioning ourselves to the GONE state.
        if self.inner.state.swap(GONE, SeqCst) == DATA {
            drop(self.inner.data.try_lock().unwrap().take());
        }

        // Next, if we stored a handle to our own task to get woken up then
        // we're sure to drop that here. No need to keep that around and we
        // don't want to hold onto any stale references.
        if let Some(mut slot) = self.rx_task().try_lock() {
            if let Some(task) = slot.take() {
                drop(slot);
                drop(task);
            }
        }

        // And finally, if any sender was waiting for us to take some data we
        // ... well ... took the data! If they're here then wake them up.
        if let Some(mut slot) = self.tx_task().try_lock() {
            if let Some(task) = slot.take() {
                drop(slot);
                task.unpark();
            }
        }
    }
}

impl<T, E> Sender<T, E> {
    /// Sends a new value along this channel to the receiver.
    ///
    /// This method consumes the sender and returns a future which will resolve
    /// to the sender again when the value sent has been consumed.
    pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> {
        // FIXME(#164) this should send immediately
        FutureSender {
            state: Some((self, t)),
        }
    }

    /// Same as Receiver::rx_task above.
    fn rx_task(&self) -> &Lock<Option<Task>> {
        if self.flag {
            &self.inner.rx_task1
        } else {
            &self.inner.rx_task2
        }
    }

    /// *Almost* the same as Receiver::tx_task above were it not for the lone
    /// `!` in front of `self.flag`.
    ///
    /// Here we actually invert what we're looking at to ensure that the
    /// receiver and the sender are always trying to deal with the same task
    /// blocking location.
    ///
    /// For example if the sender has not received anything, it'll wake up
    /// blocked tasks in `tx_task1`. If we've sent something, however, our flag
    /// will be the opposite and then when we block we want the receiver to wake
    /// us up. As a result, we invert our logic to block in the same location.
    fn tx_task(&self) -> &Lock<Option<Task>> {
        if !self.flag {
            &self.inner.tx_task1
        } else {
            &self.inner.tx_task2
        }
    }
}

impl<T, E> Drop for Sender<T, E> {
    fn drop(&mut self) {
        // Like Receiver::drop we let our other half know we're gone, and then
        // we try to wake them up if they're waiting for us. Note that we don't
        // frob tx_task here as that's done in FutureSender down below.
        self.inner.state.store(GONE, SeqCst);

        if let Some(mut slot) = self.rx_task().try_lock() {
            if let Some(task) = slot.take() {
                drop(slot);
                task.unpark();
            }
        }
    }
}

impl<T, E> Future for FutureSender<T, E> {
    type Item = Sender<T, E>;
    type Error = SendError<T, E>;

    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
        // This is very similar to `Receiver::poll` above, it's basically just
        // the opposite in a few places.
        let (mut sender, data) = self.state.take().expect("cannot poll \
                                                           FutureSender twice");
        let mut empty = false;
        match sender.inner.state.load(SeqCst) {
            GONE => return Err(SendError(data)),
            EMPTY => empty = true,
            DATA => {
                let task = task::park();
                match sender.tx_task().try_lock() {
                    Some(mut slot) => *slot = Some(task),
                    None => empty = true,
                }
            }
            n => panic!("bad state: {}", n),
        }
        if !empty {
            match sender.inner.state.load(SeqCst) {
                // If there's still data on the channel we've successfully
                // blocked, so return.
                DATA => {
                    self.state = Some((sender, data));
                    return Ok(Async::NotReady)
                }

                // If the receiver is gone, inform so immediately. The receiver
                // may also be looking at `inner.data` at this point so we have
                // to avoid it.
                GONE => return Err(SendError(data)),

                // Oh oops! Looks like we blocked ourselves but during that time
                // the data was taken, let's fall through and send our data.
                EMPTY => {}
                n => panic!("bad state: {}", n),
            }
        }
        *sender.inner.data.try_lock().unwrap() = Some(data);
        drop(sender.inner.state.compare_exchange(EMPTY, DATA, SeqCst, SeqCst));
        if let Some(mut slot) = sender.rx_task().try_lock() {
            if let Some(task) = slot.take() {
                drop(slot);
                task.unpark();
            }
        }
        sender.flag = !sender.flag;
        Ok(sender.into())
    }
}

impl<T, E> Drop for FutureSender<T, E> {
    fn drop(&mut self) {
        let sender = match self.state.take() {
            Some((sender, _)) => sender,
            None => return,
        };

        // If we've registered a task to be interested in when the sender was
        // empty again, there's no need to hold onto it any more, so extract it
        // and drop it.
        let slot = sender.tx_task().try_lock();
        if let Some(mut slot) = slot {
            if let Some(task) = slot.take() {
                drop((slot, task));
            }
        }
    }
}