1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
//! A general purpose crate for working with timeouts and delays with futures.
//!
//! This crate is intended to provide general purpose timeouts and interval
//! streams for working with `futures`. The implementation may not be optimized
//! for your particular use case, though, so be sure to read up on the details
//! if you're concerned about that!
//!
//! Basic usage of this crate is relatively simple:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # #[runtime::main]
//! # async fn main() {
//! use std::time::Duration;
//! use futures_timer::Delay;
//! use futures::prelude::*;
//!
//! let now = Delay::new(Duration::from_secs(3)).await;
//! println!("waited for 3 secs");
//! # }
//! ```
//!
//! In addition to a one-shot future you can also create a stream of delayed
//! notifications with the `Interval` type:
//!
//! ```no_run
//! # fn main() {
//! use std::time::Duration;
//! use futures_timer::Interval;
//! use futures::prelude::*;
//!
//! let dur = Duration::from_secs(4);
//! let stream = Interval::new(dur)
//!     .map(|()| println!("prints every four seconds"));
//! // spawn or use the stream
//! # }
//! ```
//!
//! And you're off to the races! Check out the API documentation for more
//! details about the various methods on `Delay` and `Interval`.
//!
//! # Implementation details
//!
//! The `Delay` and `Interval` types are powered by an associated `Timer`. By
//! default constructors like `Delay::new` and `Interval::new` use a global
//! instance of `Timer` to power their usage. This global `Timer` is spawned
//! onto a helper thread which continuously runs in the background sending out
//! timer notifications.
//!
//! If needed, however, a `Timer` can be constructed manually and the
//! `Delay::new_handle`-style methods can be used to create delays/intervals
//! associated with a specific instance of `Timer`. Each `Timer` has a
//! `TimerHandle` type which is used to associate new objects to it.
//!
//! Note that there's also a `TimerHandle::set_fallback` method which will
//! globally configure the fallback timer handle as well if you'd like to run
//! your own timer.
//!
//! Finally, the implementation of `Timer` itself is currently a binary heap.
//! Timer insertion is O(log n) where n is the number of active timers, and so
//! is firing a timer (which invovles removing from the heap).

#![deny(missing_docs)]
#![warn(missing_debug_implementations)]

use std::cmp::Ordering;
use std::mem;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll};
use std::time::Instant;
use std::fmt;

use futures::prelude::*;
use futures::task::AtomicWaker;

use arc_list::{ArcList, Node};
use heap::{Heap, Slot};

mod arc_list;
mod global;
mod heap;

pub mod ext;
pub use ext::{TryFutureExt, TryStreamExt};

/// A "timer heap" used to power separately owned instances of `Delay` and
/// `Interval`.
///
/// This timer is implemented as a priority queued-based heap. Each `Timer`
/// contains a few primary methods which which to drive it:
///
/// * `next_wake` indicates how long the ambient system needs to sleep until it
///   invokes further processing on a `Timer`
/// * `advance_to` is what actually fires timers on the `Timer`, and should be
///   called essentially every iteration of the event loop, or when the time
///   specified by `next_wake` has elapsed.
/// * The `Future` implementation for `Timer` is used to process incoming timer
///   updates and requests. This is used to schedule new timeouts, update
///   existing ones, or delete existing timeouts. The `Future` implementation
///   will never resolve, but it'll schedule notifications of when to wake up
///   and process more messages.
///
/// Note that if you're using this crate you probably don't need to use a
/// `Timer` as there is a global one already available for you run on a helper
/// thread. If this isn't desirable, though, then the
/// `TimerHandle::set_fallback` method can be used instead!
pub struct Timer {
    inner: Arc<Inner>,
    timer_heap: Heap<HeapTimer>,
}

/// A handle to a `Timer` which is used to create instances of a `Delay`.
#[derive(Clone)]
pub struct TimerHandle {
    inner: Weak<Inner>,
}

mod delay;
mod interval;
pub use self::delay::Delay;
pub use self::interval::Interval;

struct Inner {
    /// List of updates the `Timer` needs to process
    list: ArcList<ScheduledTimer>,

    /// The blocked `Timer` task to receive notifications to the `list` above.
    waker: AtomicWaker,
}

/// Shared state between the `Timer` and a `Delay`.
struct ScheduledTimer {
    waker: AtomicWaker,

    // The lowest bit here is whether the timer has fired or not, the second
    // lowest bit is whether the timer has been invalidated, and all the other
    // bits are the "generation" of the timer which is reset during the `reset`
    // function. Only timers for a matching generation are fired.
    state: AtomicUsize,

    inner: Weak<Inner>,
    at: Mutex<Option<Instant>>,

    // TODO: this is only accessed by the timer thread, should have a more
    // lightweight protection than a `Mutex`
    slot: Mutex<Option<Slot>>,
}

/// Entries in the timer heap, sorted by the instant they're firing at and then
/// also containing some payload data.
struct HeapTimer {
    at: Instant,
    gen: usize,
    node: Arc<Node<ScheduledTimer>>,
}

impl Timer {
    /// Creates a new timer heap ready to create new timers.
    pub fn new() -> Timer {
        Timer {
            inner: Arc::new(Inner {
                list: ArcList::new(),
                waker: AtomicWaker::new(),
            }),
            timer_heap: Heap::new(),
        }
    }

    /// Returns a handle to this timer heap, used to create new timeouts.
    pub fn handle(&self) -> TimerHandle {
        TimerHandle {
            inner: Arc::downgrade(&self.inner),
        }
    }

    /// Returns the time at which this timer next needs to be invoked with
    /// `advance_to`.
    ///
    /// Event loops or threads typically want to sleep until the specified
    /// instant.
    pub fn next_event(&self) -> Option<Instant> {
        self.timer_heap.peek().map(|t| t.at)
    }

    /// Proces any timers which are supposed to fire at or before the current
    /// instant.
    ///
    /// This method is equivalent to `self.advance_to(Instant::now())`.
    pub fn advance(&mut self) {
        self.advance_to(Instant::now())
    }

    /// Proces any timers which are supposed to fire before `now` specified.
    ///
    /// This method should be called on `Timer` periodically to advance the
    /// internal state and process any pending timers which need to fire.
    pub fn advance_to(&mut self, now: Instant) {
        loop {
            match self.timer_heap.peek() {
                Some(head) if head.at <= now => {}
                Some(_) => break,
                None => break,
            };

            // Flag the timer as fired and then notify its task, if any, that's
            // blocked.
            let heap_timer = self.timer_heap.pop().unwrap();
            *heap_timer.node.slot.lock().unwrap() = None;
            let bits = heap_timer.gen << 2;
            match heap_timer
                .node
                .state
                .compare_exchange(bits, bits | 0b01, SeqCst, SeqCst)
            {
                Ok(_) => heap_timer.node.waker.wake(),
                Err(_b) => {}
            }
        }
    }

    /// Either updates the timer at slot `idx` to fire at `at`, or adds a new
    /// timer at `idx` and sets it to fire at `at`.
    fn update_or_add(&mut self, at: Instant, node: Arc<Node<ScheduledTimer>>) {
        // TODO: avoid remove + push and instead just do one sift of the heap?
        // In theory we could update it in place and then do the percolation
        // as necessary
        let gen = node.state.load(SeqCst) >> 2;
        let mut slot = node.slot.lock().unwrap();
        if let Some(heap_slot) = slot.take() {
            self.timer_heap.remove(heap_slot);
        }
        *slot = Some(self.timer_heap.push(HeapTimer {
            at: at,
            gen: gen,
            node: node.clone(),
        }));
    }

    fn remove(&mut self, node: Arc<Node<ScheduledTimer>>) {
        // If this `idx` is still around and it's still got a registered timer,
        // then we jettison it form the timer heap.
        let mut slot = node.slot.lock().unwrap();
        let heap_slot = match slot.take() {
            Some(slot) => slot,
            None => return,
        };
        self.timer_heap.remove(heap_slot);
    }

    fn invalidate(&mut self, node: Arc<Node<ScheduledTimer>>) {
        node.state.fetch_or(0b10, SeqCst);
        node.waker.wake();
    }
}

impl Future for Timer {
    type Output = ();

    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
        Pin::new(&mut self.inner).waker.register(cx.waker());
        let mut list = self.inner.list.take();
        while let Some(node) = list.pop() {
            let at = *node.at.lock().unwrap();
            match at {
                Some(at) => self.update_or_add(at, node),
                None => self.remove(node),
            }
        }
        Poll::Pending
    }
}

impl Drop for Timer {
    fn drop(&mut self) {
        // Seal off our list to prevent any more updates from getting pushed on.
        // Any timer which sees an error from the push will immediately become
        // inert.
        let mut list = self.inner.list.take_and_seal();

        // Now that we'll never receive another timer, drain the list of all
        // updates and also drain our heap of all active timers, invalidating
        // everything.
        while let Some(t) = list.pop() {
            self.invalidate(t);
        }
        while let Some(t) = self.timer_heap.pop() {
            self.invalidate(t.node);
        }
    }
}

impl fmt::Debug for Timer {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
        f.debug_struct("Timer").field("heap", &"...").finish()
    }
}

impl PartialEq for HeapTimer {
    fn eq(&self, other: &HeapTimer) -> bool {
        self.at == other.at
    }
}

impl Eq for HeapTimer {}

impl PartialOrd for HeapTimer {
    fn partial_cmp(&self, other: &HeapTimer) -> Option<Ordering> {
        Some(self.cmp(other))
    }
}

impl Ord for HeapTimer {
    fn cmp(&self, other: &HeapTimer) -> Ordering {
        self.at.cmp(&other.at)
    }
}

static HANDLE_FALLBACK: AtomicUsize = AtomicUsize::new(0);

/// Error returned from `TimerHandle::set_fallback`.
#[derive(Clone, Debug)]
pub struct SetDefaultError(());

impl TimerHandle {
    /// Configures this timer handle to be the one returned by
    /// `TimerHandle::default`.
    ///
    /// By default a global thread is initialized on the first call to
    /// `TimerHandle::default`. This first call can happen transitively through
    /// `Delay::new`. If, however, that hasn't happened yet then the global
    /// default timer handle can be configured through this method.
    ///
    /// This method can be used to prevent the global helper thread from
    /// spawning. If this method is successful then the global helper thread
    /// will never get spun up.
    ///
    /// On success this timer handle will have installed itself globally to be
    /// used as the return value for `TimerHandle::default` unless otherwise
    /// specified.
    ///
    /// # Errors
    ///
    /// If another thread has already called `set_as_global_fallback` or this
    /// thread otherwise loses a race to call this method then it will fail
    /// returning an error. Once a call to `set_as_global_fallback` is
    /// successful then no future calls may succeed.
    pub fn set_as_global_fallback(self) -> Result<(), SetDefaultError> {
        unsafe {
            let val = self.into_usize();
            match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) {
                Ok(_) => Ok(()),
                Err(_) => {
                    drop(TimerHandle::from_usize(val));
                    Err(SetDefaultError(()))
                }
            }
        }
    }

    fn into_usize(self) -> usize {
        unsafe { mem::transmute::<Weak<Inner>, usize>(self.inner) }
    }

    unsafe fn from_usize(val: usize) -> TimerHandle {
        let inner = mem::transmute::<usize, Weak<Inner>>(val);;
        TimerHandle { inner }
    }
}

impl Default for TimerHandle {
    fn default() -> TimerHandle {
        let mut fallback = HANDLE_FALLBACK.load(SeqCst);

        // If the fallback hasn't been previously initialized then let's spin
        // up a helper thread and try to initialize with that. If we can't
        // actually create a helper thread then we'll just return a "defunkt"
        // handle which will return errors when timer objects are attempted to
        // be associated.
        if fallback == 0 {
            let helper = match global::HelperThread::new() {
                Ok(helper) => helper,
                Err(_) => return TimerHandle { inner: Weak::new() },
            };

            // If we successfully set ourselves as the actual fallback then we
            // want to `forget` the helper thread to ensure that it persists
            // globally. If we fail to set ourselves as the fallback that means
            // that someone was racing with this call to
            // `TimerHandle::default`.  They ended up winning so we'll destroy
            // our helper thread (which shuts down the thread) and reload the
            // fallback.
            if helper.handle().set_as_global_fallback().is_ok() {
                let ret = helper.handle();
                helper.forget();
                return ret;
            }
            fallback = HANDLE_FALLBACK.load(SeqCst);
        }

        // At this point our fallback handle global was configured so we use
        // its value to reify a handle, clone it, and then forget our reified
        // handle as we don't actually have an owning reference to it.
        assert!(fallback != 0);
        unsafe {
            let handle = TimerHandle::from_usize(fallback);
            let ret = handle.clone();
            drop(handle.into_usize());
            return ret;
        }
    }
}

impl fmt::Debug for TimerHandle {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
        f.debug_struct("TimerHandle").field("inner", &"...").finish()
    }
}