tokio_stream_util/futures_unordered/
mod.rs

1//! An unbounded set of futures.
2//!
3//! This module is only available when the `std` or `alloc` feature of this
4//! library is activated, and it is activated by default.
5
6use crate::FusedStream;
7use alloc::sync::{Arc, Weak};
8use core::cell::UnsafeCell;
9use core::fmt::{self, Debug};
10use core::future::Future;
11use core::iter::FromIterator;
12use core::marker::PhantomData;
13use core::mem;
14use core::pin::Pin;
15use core::ptr;
16use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
17use core::sync::atomic::{AtomicBool, AtomicPtr};
18use core::task::{Context, Poll};
19use futures_util::task::AtomicWaker;
20use tokio_stream::Stream;
21
22mod abort;
23
24mod iter;
25pub use iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef};
26
27mod task;
28use task::Task;
29
30mod ready_to_run_queue;
31use ready_to_run_queue::{Dequeue, ReadyToRunQueue};
32
33/// A set of futures which may complete in any order.
34///
35/// See [`FuturesOrdered`](crate::FuturesOrdered) for a version of this
36/// type that preserves a FIFO order.
37///
38/// This structure is optimized to manage a large number of futures.
39/// Futures managed by [`FuturesUnordered`] will only be polled when they
40/// generate wake-up notifications. This reduces the required amount of work
41/// needed to poll large numbers of futures.
42///
43/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an
44/// iterator of futures into a [`FuturesUnordered`], or by
45/// [`push`](FuturesUnordered::push)ing futures onto an existing
46/// [`FuturesUnordered`]. When new futures are added,
47/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
48/// wake-ups for new futures.
49///
50/// Note that you can create a ready-made [`FuturesUnordered`] via the
51/// [`collect`](Iterator::collect) method, or you can start with an empty set
52/// with the [`FuturesUnordered::new`] constructor.
53///
54/// This type is only available when the `std` or `alloc` feature of this
55/// library is activated, and it is activated by default.
56#[must_use = "streams do nothing unless polled"]
57pub struct FuturesUnordered<Fut> {
58    ready_to_run_queue: Arc<ReadyToRunQueue<Fut>>,
59    head_all: AtomicPtr<Task<Fut>>,
60    is_terminated: AtomicBool,
61}
62
63unsafe impl<Fut: Send> Send for FuturesUnordered<Fut> {}
64unsafe impl<Fut: Send + Sync> Sync for FuturesUnordered<Fut> {}
65impl<Fut> Unpin for FuturesUnordered<Fut> {}
66
67// FuturesUnordered is implemented using two linked lists. One which links all
68// futures managed by a `FuturesUnordered` and one that tracks futures that have
69// been scheduled for polling. The first linked list allows for thread safe
70// insertion of nodes at the head as well as forward iteration, but is otherwise
71// not thread safe and is only accessed by the thread that owns the
72// `FuturesUnordered` value for any other operations. The second linked list is
73// an implementation of the intrusive MPSC queue algorithm described by
74// 1024cores.net.
75//
76// When a future is submitted to the set, a task is allocated and inserted in
77// both linked lists. The next call to `poll_next` will (eventually) see this
78// task and call `poll` on the future.
79//
80// Before a managed future is polled, the current context's waker is replaced
81// with one that is aware of the specific future being run. This ensures that
82// wake-up notifications generated by that specific future are visible to
83// `FuturesUnordered`. When a wake-up notification is received, the task is
84// inserted into the ready to run queue, so that its future can be polled later.
85//
86// Each task is wrapped in an `Arc` and thereby atomically reference counted.
87// Also, each task contains an `AtomicBool` which acts as a flag that indicates
88// whether the task is currently inserted in the atomic queue. When a wake-up
89// notification is received, the task will only be inserted into the ready to
90// run queue if it isn't inserted already.
91
92impl<Fut> Default for FuturesUnordered<Fut> {
93    fn default() -> Self {
94        Self::new()
95    }
96}
97
98impl<Fut> FuturesUnordered<Fut> {
99    /// Constructs a new, empty [`FuturesUnordered`].
100    ///
101    /// The returned [`FuturesUnordered`] does not contain any futures.
102    /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will
103    /// return [`Poll::Ready(None)`](Poll::Ready).
104    pub fn new() -> Self {
105        let stub = Arc::new(Task {
106            future: UnsafeCell::new(None),
107            next_all: AtomicPtr::new(ptr::null_mut()),
108            prev_all: UnsafeCell::new(ptr::null()),
109            len_all: UnsafeCell::new(0),
110            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
111            queued: AtomicBool::new(true),
112            ready_to_run_queue: Weak::new(),
113            woken: AtomicBool::new(false),
114        });
115        let stub_ptr = Arc::as_ptr(&stub);
116        let ready_to_run_queue = Arc::new(ReadyToRunQueue {
117            waker: AtomicWaker::new(),
118            head: AtomicPtr::new(stub_ptr as *mut _),
119            tail: UnsafeCell::new(stub_ptr),
120            stub,
121        });
122
123        Self {
124            head_all: AtomicPtr::new(ptr::null_mut()),
125            ready_to_run_queue,
126            is_terminated: AtomicBool::new(false),
127        }
128    }
129
130    /// Returns the number of futures contained in the set.
131    ///
132    /// This represents the total number of in-flight futures.
133    pub fn len(&self) -> usize {
134        let (_, len) = self.atomic_load_head_and_len_all();
135        len
136    }
137
138    /// Returns `true` if the set contains no futures.
139    pub fn is_empty(&self) -> bool {
140        // Relaxed ordering can be used here since we don't need to read from
141        // the head pointer, only check whether it is null.
142        self.head_all.load(Relaxed).is_null()
143    }
144
145    /// Push a future into the set.
146    ///
147    /// This method adds the given future to the set. This method will not
148    /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must
149    /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called
150    /// in order to receive wake-up notifications for the given future.
151    pub fn push(&self, future: Fut) {
152        let task = Arc::new(Task {
153            future: UnsafeCell::new(Some(future)),
154            next_all: AtomicPtr::new(self.pending_next_all()),
155            prev_all: UnsafeCell::new(ptr::null_mut()),
156            len_all: UnsafeCell::new(0),
157            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
158            queued: AtomicBool::new(true),
159            ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
160            woken: AtomicBool::new(false),
161        });
162
163        // Reset the `is_terminated` flag if we've previously marked ourselves
164        // as terminated.
165        self.is_terminated.store(false, Relaxed);
166
167        // Right now our task has a strong reference count of 1. We transfer
168        // ownership of this reference count to our internal linked list
169        // and we'll reclaim ownership through the `unlink` method below.
170        let ptr = self.link(task);
171
172        // We'll need to get the future "into the system" to start tracking it,
173        // e.g. getting its wake-up notifications going to us tracking which
174        // futures are ready. To do that we unconditionally enqueue it for
175        // polling here.
176        self.ready_to_run_queue.enqueue(ptr);
177    }
178
179    /// Returns an iterator that allows inspecting each future in the set.
180    pub fn iter(&self) -> Iter<'_, Fut>
181    where
182        Fut: Unpin,
183    {
184        Iter(Pin::new(self).iter_pin_ref())
185    }
186
187    /// Returns an iterator that allows inspecting each future in the set.
188    pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> {
189        let (task, len) = self.atomic_load_head_and_len_all();
190        let pending_next_all = self.pending_next_all();
191
192        IterPinRef {
193            task,
194            len,
195            pending_next_all,
196            _marker: PhantomData,
197        }
198    }
199
200    /// Returns an iterator that allows modifying each future in the set.
201    pub fn iter_mut(&mut self) -> IterMut<'_, Fut>
202    where
203        Fut: Unpin,
204    {
205        IterMut(Pin::new(self).iter_pin_mut())
206    }
207
208    /// Returns an iterator that allows modifying each future in the set.
209    pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> {
210        // `head_all` can be accessed directly and we don't need to spin on
211        // `Task::next_all` since we have exclusive access to the set.
212        let task = *self.head_all.get_mut();
213        let len = if task.is_null() {
214            0
215        } else {
216            unsafe { *(*task).len_all.get() }
217        };
218
219        IterPinMut {
220            task,
221            len,
222            _marker: PhantomData,
223        }
224    }
225
226    /// Returns the current head node and number of futures in the list of all
227    /// futures within a context where access is shared with other threads
228    /// (mostly for use with the `len` and `iter_pin_ref` methods).
229    fn atomic_load_head_and_len_all(&self) -> (*const Task<Fut>, usize) {
230        let task = self.head_all.load(Acquire);
231        let len = if task.is_null() {
232            0
233        } else {
234            unsafe {
235                (*task).spin_next_all(self.pending_next_all(), Acquire);
236                *(*task).len_all.get()
237            }
238        };
239
240        (task, len)
241    }
242
243    /// Releases the task. It destroys the future inside and either drops
244    /// the `Arc<Task>` or transfers ownership to the ready to run queue.
245    /// The task this method is called on must have been unlinked before.
246    fn release_task(&mut self, task: Arc<Task<Fut>>) {
247        // `release_task` must only be called on unlinked tasks
248        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
249        unsafe {
250            debug_assert!((*task.prev_all.get()).is_null());
251        }
252
253        // The future is done, try to reset the queued flag. This will prevent
254        // `wake` from doing any work in the future
255        let prev = task.queued.swap(true, SeqCst);
256
257        // If the queued flag was previously set, then it means that this task
258        // is still in our internal ready to run queue. We then transfer
259        // ownership of our reference count to the ready to run queue, and it'll
260        // come along and free it later, noticing that the future is `None`.
261        //
262        // If, however, the queued flag was *not* set then we're safe to
263        // release our reference count on the task. The queued flag was set
264        // above so all future `enqueue` operations will not actually
265        // enqueue the task, so our task will never see the ready to run queue
266        // again. The task itself will be deallocated once all reference counts
267        // have been dropped elsewhere by the various wakers that contain it.
268        //
269        // Use ManuallyDrop to transfer the reference count ownership before
270        // dropping the future so unwinding won't release the reference count.
271        let md_slot;
272        let task = if prev {
273            md_slot = mem::ManuallyDrop::new(task);
274            &*md_slot
275        } else {
276            &task
277        };
278
279        // Drop the future, even if it hasn't finished yet. This is safe
280        // because we're dropping the future on the thread that owns
281        // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and
282        // such.
283        unsafe {
284            // Set to `None` rather than `take()`ing to prevent moving the
285            // future.
286            *task.future.get() = None;
287        }
288    }
289
290    /// Insert a new task into the internal linked list.
291    fn link(&self, task: Arc<Task<Fut>>) -> *const Task<Fut> {
292        // `next_all` should already be reset to the pending state before this
293        // function is called.
294        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
295        let ptr = Arc::into_raw(task);
296
297        // Atomically swap out the old head node to get the node that should be
298        // assigned to `next_all`.
299        let next = self.head_all.swap(ptr as *mut _, AcqRel);
300
301        unsafe {
302            // Store the new list length in the new node.
303            let new_len = if next.is_null() {
304                1
305            } else {
306                // Make sure `next_all` has been written to signal that it is
307                // safe to read `len_all`.
308                (*next).spin_next_all(self.pending_next_all(), Acquire);
309                *(*next).len_all.get() + 1
310            };
311            *(*ptr).len_all.get() = new_len;
312
313            // Write the old head as the next node pointer, signaling to other
314            // threads that `len_all` and `next_all` are ready to read.
315            (*ptr).next_all.store(next, Release);
316
317            // `prev_all` updates don't need to be synchronized, as the field is
318            // only ever used after exclusive access has been acquired.
319            if !next.is_null() {
320                *(*next).prev_all.get() = ptr;
321            }
322        }
323
324        ptr
325    }
326
327    /// Remove the task from the linked list tracking all tasks currently
328    /// managed by `FuturesUnordered`.
329    /// This method is unsafe because it has be guaranteed that `task` is a
330    /// valid pointer.
331    unsafe fn unlink(&mut self, task: *const Task<Fut>) -> Arc<Task<Fut>> {
332        unsafe {
333            // Compute the new list length now in case we're removing the head node
334            // and won't be able to retrieve the correct length later.
335            let head = *self.head_all.get_mut();
336            debug_assert!(!head.is_null());
337            let new_len = *(*head).len_all.get() - 1;
338
339            let task = Arc::from_raw(task);
340            let next = task.next_all.load(Relaxed);
341            let prev = *task.prev_all.get();
342            task.next_all.store(self.pending_next_all(), Relaxed);
343            *task.prev_all.get() = ptr::null_mut();
344
345            if !next.is_null() {
346                *(*next).prev_all.get() = prev;
347            }
348
349            if !prev.is_null() {
350                (*prev).next_all.store(next, Relaxed);
351            } else {
352                *self.head_all.get_mut() = next;
353            }
354
355            // Store the new list length in the head node.
356            let head = *self.head_all.get_mut();
357            if !head.is_null() {
358                *(*head).len_all.get() = new_len;
359            }
360
361            task
362        }
363    }
364
365    /// Returns the reserved value for `Task::next_all` to indicate a pending
366    /// assignment from the thread that inserted the task.
367    ///
368    /// `FuturesUnordered::link` needs to update `Task` pointers in an order
369    /// that ensures any iterators created on other threads can correctly
370    /// traverse the entire `Task` list using the chain of `next_all` pointers.
371    /// This could be solved with a compare-exchange loop that stores the
372    /// current `head_all` in `next_all` and swaps out `head_all` with the new
373    /// `Task` pointer if the head hasn't already changed. Under heavy thread
374    /// contention, this compare-exchange loop could become costly.
375    ///
376    /// An alternative is to initialize `next_all` to a reserved pending state
377    /// first, perform an atomic swap on `head_all`, and finally update
378    /// `next_all` with the old head node. Iterators will then either see the
379    /// pending state value or the correct next node pointer, and can reload
380    /// `next_all` as needed until the correct value is loaded. The number of
381    /// retries needed (if any) would be small and will always be finite, so
382    /// this should generally perform better than the compare-exchange loop.
383    ///
384    /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
385    /// this value, so it is safe to use as a reserved value until the correct
386    /// value can be written.
387    fn pending_next_all(&self) -> *mut Task<Fut> {
388        // The `ReadyToRunQueue` stub is never inserted into the `head_all`
389        // list, and its pointer value will remain valid for the lifetime of
390        // this `FuturesUnordered`, so we can make use of its value here.
391        Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _
392    }
393}
394
395impl<Fut: Future> Stream for FuturesUnordered<Fut> {
396    type Item = Fut::Output;
397
398    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
399        let len = self.len();
400
401        // Keep track of how many child futures we have polled,
402        // in case we want to forcibly yield.
403        let mut polled = 0;
404        let mut yielded = 0;
405
406        // Ensure `parent` is correctly set.
407        self.ready_to_run_queue.waker.register(cx.waker());
408
409        loop {
410            // Safety: &mut self guarantees the mutual exclusion `dequeue`
411            // expects
412            let task = match unsafe { self.ready_to_run_queue.dequeue() } {
413                Dequeue::Empty => {
414                    if self.is_empty() {
415                        // We can only consider ourselves terminated once we
416                        // have yielded a `None`
417                        *self.is_terminated.get_mut() = true;
418                        return Poll::Ready(None);
419                    } else {
420                        return Poll::Pending;
421                    }
422                }
423                Dequeue::Inconsistent => {
424                    // At this point, it may be worth yielding the thread &
425                    // spinning a few times... but for now, just yield using the
426                    // task system.
427                    cx.waker().wake_by_ref();
428                    return Poll::Pending;
429                }
430                Dequeue::Data(task) => task,
431            };
432
433            debug_assert!(task != self.ready_to_run_queue.stub());
434
435            // Safety:
436            // - `task` is a valid pointer.
437            // - We are the only thread that accesses the `UnsafeCell` that
438            //   contains the future
439            let future = match unsafe { &mut *(*task).future.get() } {
440                Some(future) => future,
441
442                // If the future has already gone away then we're just
443                // cleaning out this task. See the comment in
444                // `release_task` for more information, but we're basically
445                // just taking ownership of our reference count here.
446                None => {
447                    // This case only happens when `release_task` was called
448                    // for this task before and couldn't drop the task
449                    // because it was already enqueued in the ready to run
450                    // queue.
451
452                    // Safety: `task` is a valid pointer
453                    let task = unsafe { Arc::from_raw(task) };
454
455                    // Double check that the call to `release_task` really
456                    // happened. Calling it required the task to be unlinked.
457                    debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
458                    unsafe {
459                        debug_assert!((*task.prev_all.get()).is_null());
460                    }
461                    continue;
462                }
463            };
464
465            // Safety: `task` is a valid pointer
466            let task = unsafe { self.unlink(task) };
467
468            // Unset queued flag: This must be done before polling to ensure
469            // that the future's task gets rescheduled if it sends a wake-up
470            // notification **during** the call to `poll`.
471            let prev = task.queued.swap(false, SeqCst);
472            assert!(prev);
473
474            // We're going to need to be very careful if the `poll`
475            // method below panics. We need to (a) not leak memory and
476            // (b) ensure that we still don't have any use-after-frees. To
477            // manage this we do a few things:
478            //
479            // * A "bomb" is created which if dropped abnormally will call
480            //   `release_task`. That way we'll be sure the memory management
481            //   of the `task` is managed correctly. In particular
482            //   `release_task` will drop the future. This ensures that it is
483            //   dropped on this thread and not accidentally on a different
484            //   thread (bad).
485            // * We unlink the task from our internal queue to preemptively
486            //   assume it'll panic, in which case we'll want to discard it
487            //   regardless.
488            struct Bomb<'a, Fut> {
489                queue: &'a mut FuturesUnordered<Fut>,
490                task: Option<Arc<Task<Fut>>>,
491            }
492
493            impl<Fut> Drop for Bomb<'_, Fut> {
494                fn drop(&mut self) {
495                    if let Some(task) = self.task.take() {
496                        self.queue.release_task(task);
497                    }
498                }
499            }
500
501            let mut bomb = Bomb {
502                task: Some(task),
503                queue: &mut *self,
504            };
505
506            // Poll the underlying future with the appropriate waker
507            // implementation. This is where a large bit of the unsafety
508            // starts to stem from internally. The waker is basically just
509            // our `Arc<Task<Fut>>` and can schedule the future for polling by
510            // enqueuing itself in the ready to run queue.
511            //
512            // Critically though `Task<Fut>` won't actually access `Fut`, the
513            // future, while it's floating around inside of wakers.
514            // These structs will basically just use `Fut` to size
515            // the internal allocation, appropriately accessing fields and
516            // deallocating the task if need be.
517            let res = {
518                let task = bomb.task.as_ref().unwrap();
519                // We are only interested in whether the future is awoken before it
520                // finishes polling, so reset the flag here.
521                task.woken.store(false, Relaxed);
522                // SAFETY: see the comments of Bomb and this block.
523                let waker = unsafe { Task::waker_ref(task) };
524                let mut cx = Context::from_waker(&waker);
525
526                // Safety: We won't move the future ever again
527                let future = unsafe { Pin::new_unchecked(future) };
528
529                future.poll(&mut cx)
530            };
531            polled += 1;
532
533            match res {
534                Poll::Pending => {
535                    let task = bomb.task.take().unwrap();
536                    // If the future was awoken during polling, we assume
537                    // the future wanted to explicitly yield.
538                    yielded += task.woken.load(Relaxed) as usize;
539                    bomb.queue.link(task);
540
541                    // If a future yields, we respect it and yield here.
542                    // If all futures have been polled, we also yield here to
543                    // avoid starving other tasks waiting on the executor.
544                    // (polling the same future twice per iteration may cause
545                    // the problem: https://github.com/rust-lang/futures-rs/pull/2333)
546                    if yielded >= 2 || polled == len {
547                        cx.waker().wake_by_ref();
548                        return Poll::Pending;
549                    }
550                    continue;
551                }
552                Poll::Ready(output) => return Poll::Ready(Some(output)),
553            }
554        }
555    }
556
557    fn size_hint(&self) -> (usize, Option<usize>) {
558        let len = self.len();
559        (len, Some(len))
560    }
561}
562
563impl<Fut> Debug for FuturesUnordered<Fut> {
564    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
565        write!(f, "FuturesUnordered {{ ... }}")
566    }
567}
568
569impl<Fut> FuturesUnordered<Fut> {
570    /// Clears the set, removing all futures.
571    pub fn clear(&mut self) {
572        *self = Self::new();
573    }
574}
575
576impl<Fut> Drop for FuturesUnordered<Fut> {
577    fn drop(&mut self) {
578        // Before the strong reference to the queue is dropped we need all
579        // futures to be dropped. See note at the bottom of this method.
580        //
581        // If there is a panic before this completes, we leak the queue.
582        struct LeakQueueOnDrop<'a, Fut>(&'a mut FuturesUnordered<Fut>);
583        impl<Fut> Drop for LeakQueueOnDrop<'_, Fut> {
584            fn drop(&mut self) {
585                mem::forget(Arc::clone(&self.0.ready_to_run_queue));
586            }
587        }
588        let guard = LeakQueueOnDrop(self);
589        // When a `FuturesUnordered` is dropped we want to drop all futures
590        // associated with it. At the same time though there may be tons of
591        // wakers flying around which contain `Task<Fut>` references
592        // inside them. We'll let those naturally get deallocated.
593        while !guard.0.head_all.get_mut().is_null() {
594            let head = *guard.0.head_all.get_mut();
595            let task = unsafe { guard.0.unlink(head) };
596            guard.0.release_task(task);
597        }
598        mem::forget(guard); // safe to release strong reference to queue
599
600        // Note that at this point we could still have a bunch of tasks in the
601        // ready to run queue. None of those tasks, however, have futures
602        // associated with them so they're safe to destroy on any thread. At
603        // this point the `FuturesUnordered` struct, the owner of the one strong
604        // reference to the ready to run queue will drop the strong reference.
605        // At that point whichever thread releases the strong refcount last (be
606        // it this thread or some other thread as part of an `upgrade`) will
607        // clear out the ready to run queue and free all remaining tasks.
608        //
609        // While that freeing operation isn't guaranteed to happen here, it's
610        // guaranteed to happen "promptly" as no more "blocking work" will
611        // happen while there's a strong refcount held.
612    }
613}
614
615impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered<Fut> {
616    type Item = &'a Fut;
617    type IntoIter = Iter<'a, Fut>;
618
619    fn into_iter(self) -> Self::IntoIter {
620        self.iter()
621    }
622}
623
624impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered<Fut> {
625    type Item = &'a mut Fut;
626    type IntoIter = IterMut<'a, Fut>;
627
628    fn into_iter(self) -> Self::IntoIter {
629        self.iter_mut()
630    }
631}
632
633impl<Fut: Unpin> IntoIterator for FuturesUnordered<Fut> {
634    type Item = Fut;
635    type IntoIter = IntoIter<Fut>;
636
637    fn into_iter(mut self) -> Self::IntoIter {
638        // `head_all` can be accessed directly and we don't need to spin on
639        // `Task::next_all` since we have exclusive access to the set.
640        let task = *self.head_all.get_mut();
641        let len = if task.is_null() {
642            0
643        } else {
644            unsafe { *(*task).len_all.get() }
645        };
646
647        IntoIter { len, inner: self }
648    }
649}
650
651impl<Fut> FromIterator<Fut> for FuturesUnordered<Fut> {
652    fn from_iter<I>(iter: I) -> Self
653    where
654        I: IntoIterator<Item = Fut>,
655    {
656        let acc = Self::new();
657        iter.into_iter().fold(acc, |acc, item| {
658            acc.push(item);
659            acc
660        })
661    }
662}
663
664impl<Fut: Future> FusedStream for FuturesUnordered<Fut> {
665    fn is_terminated(&self) -> bool {
666        self.is_terminated.load(Relaxed)
667    }
668}
669
670impl<Fut> Extend<Fut> for FuturesUnordered<Fut> {
671    fn extend<I>(&mut self, iter: I)
672    where
673        I: IntoIterator<Item = Fut>,
674    {
675        for item in iter {
676            self.push(item);
677        }
678    }
679}