par_iter/iter/
mod.rs

1//! Traits for writing parallel programs using an iterator-style interface
2//!
3//! You will rarely need to interact with this module directly unless you have
4//! need to name one of the iterator types.
5//!
6//! Parallel iterators make it easy to write iterator-like chains that
7//! execute in parallel: typically all you have to do is convert the
8//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10//! example, to compute the sum of the squares of a sequence of
11//! integers, one might write:
12//!
13//! ```rust
14//! use par_iter::prelude::*;
15//! fn sum_of_squares(input: &[i32]) -> i32 {
16//!     input.par_iter()
17//!          .map(|i| i * i)
18//!          .sum()
19//! }
20//! ```
21//!
22//! Or, to increment all the integers in a slice, you could write:
23//!
24//! ```rust
25//! use par_iter::prelude::*;
26//! fn increment_all(input: &mut [i32]) {
27//!     input.par_iter_mut()
28//!          .for_each(|p| *p += 1);
29//! }
30//! ```
31//!
32//! To use parallel iterators, first import the traits by adding
33//! something like `use par_iter::prelude::*` to your module. You can
34//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35//! parallel iterator. Like a [regular iterator][], parallel
36//! iterators work by first constructing a computation and then
37//! executing it.
38//!
39//! In addition to `par_iter()` and friends, some types offer other
40//! ways to create (or consume) parallel iterators:
41//!
42//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43//!   `par_windows`, as well as various parallel sorting operations. See [the
44//!   `ParallelSlice` trait] for the full list.
45//! - Strings (`&str`) offer methods like `par_split` and `par_lines`. See [the
46//!   `ParallelString` trait] for the full list.
47//! - Various collections offer [`par_extend`], which grows a collection given a
48//!   parallel iterator. (If you don't have a collection to extend, you can use
49//!   [`collect()`] to create a new one from scratch.)
50//!
51//! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
52//! [the `ParallelString` trait]: ../str/trait.ParallelString.html
53//! [`par_extend`]: trait.ParallelExtend.html
54//! [`collect()`]: trait.ParallelIterator.html#method.collect
55//!
56//! To see the full range of methods available on parallel iterators,
57//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
58//! traits.
59//!
60//! If you'd like to build a custom parallel iterator, or to write your own
61//! combinator, then check out the [split] function and the [plumbing] module.
62//!
63//! [regular iterator]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
64//! [`ParallelIterator`]: trait.ParallelIterator.html
65//! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
66//! [split]: fn.split.html
67//! [plumbing]: plumbing/index.html
68//!
69//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
70//! has been deliberately obscured from the public API.  This trait is intended
71//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
72//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
73//! `None`/`Err` values will exit early.
74//!
75//! A note about object safety: It is currently _not_ possible to wrap
76//! a `ParallelIterator` (or any trait that depends on it) using a
77//! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
78//! because `ParallelIterator` is **not object-safe**.
79//! (This keeps the implementation simpler and allows extra optimizations.)
80
81use std::{
82    cmp::Ordering,
83    collections::LinkedList,
84    iter::{Product, Sum},
85    ops::{Fn, RangeBounds},
86};
87
88pub use either::Either;
89
90use self::{plumbing::*, private::Try};
91
92pub mod plumbing;
93
94#[cfg(test)]
95mod test;
96
97// There is a method to the madness here:
98//
99// - These modules are private but expose certain types to the end-user (e.g.,
100//   `enumerate::Enumerate`) -- specifically, the types that appear in the
101//   public API surface of the `ParallelIterator` traits.
102// - In **this** module, those public types are always used unprefixed, which
103//   forces us to add a `pub use` and helps identify if we missed anything.
104// - In contrast, items that appear **only** in the body of a method, e.g.
105//   `find::find()`, are always used **prefixed**, so that they can be readily
106//   distinguished.
107
108mod blocks;
109mod chain;
110mod chunks;
111mod cloned;
112mod collect;
113mod copied;
114mod empty;
115mod enumerate;
116mod extend;
117mod filter;
118mod filter_map;
119mod find;
120mod find_first_last;
121mod flat_map;
122mod flat_map_iter;
123mod flatten;
124mod flatten_iter;
125mod fold;
126mod fold_chunks;
127mod fold_chunks_with;
128mod for_each;
129mod from_par_iter;
130mod inspect;
131mod interleave;
132mod interleave_shortest;
133mod intersperse;
134mod len;
135mod map;
136mod map_with;
137mod multizip;
138mod noop;
139mod once;
140mod panic_fuse;
141// mod par_bridge;
142mod positions;
143mod product;
144mod reduce;
145mod repeat;
146mod rev;
147mod skip;
148mod skip_any;
149mod skip_any_while;
150mod splitter;
151mod step_by;
152mod sum;
153mod take;
154mod take_any;
155mod take_any_while;
156mod try_fold;
157mod try_reduce;
158mod try_reduce_with;
159mod unzip;
160mod update;
161mod walk_tree;
162mod while_some;
163mod zip;
164mod zip_eq;
165
166pub use self::{
167    blocks::{ExponentialBlocks, UniformBlocks},
168    chain::Chain,
169    chunks::Chunks,
170    cloned::Cloned,
171    copied::Copied,
172    empty::{empty, Empty},
173    enumerate::Enumerate,
174    filter::Filter,
175    filter_map::FilterMap,
176    flat_map::FlatMap,
177    flat_map_iter::FlatMapIter,
178    flatten::Flatten,
179    flatten_iter::FlattenIter,
180    fold::{Fold, FoldWith},
181    fold_chunks::FoldChunks,
182    fold_chunks_with::FoldChunksWith,
183    inspect::Inspect,
184    interleave::Interleave,
185    interleave_shortest::InterleaveShortest,
186    intersperse::Intersperse,
187    len::{MaxLen, MinLen},
188    map::Map,
189    map_with::{MapInit, MapWith},
190    multizip::MultiZip,
191    once::{once, Once},
192    panic_fuse::PanicFuse,
193    // par_bridge::{IterBridge, ParallelBridge},
194    positions::Positions,
195    repeat::{repeat, repeatn, Repeat, RepeatN},
196    rev::Rev,
197    skip::Skip,
198    skip_any::SkipAny,
199    skip_any_while::SkipAnyWhile,
200    splitter::{split, Split},
201    step_by::StepBy,
202    take::Take,
203    take_any::TakeAny,
204    take_any_while::TakeAnyWhile,
205    try_fold::{TryFold, TryFoldWith},
206    update::Update,
207    walk_tree::{
208        walk_tree, walk_tree_postfix, walk_tree_prefix, WalkTree, WalkTreePostfix, WalkTreePrefix,
209    },
210    while_some::WhileSome,
211    zip::Zip,
212    zip_eq::ZipEq,
213};
214
215/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
216///
217/// By implementing `IntoParallelIterator` for a type, you define how it will
218/// transformed into an iterator. This is a parallel version of the standard
219/// library's [`std::iter::IntoIterator`] trait.
220///
221/// [`ParallelIterator`]: trait.ParallelIterator.html
222/// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
223pub trait IntoParallelIterator {
224    /// The parallel iterator type that will be created.
225    type Iter: ParallelIterator<Item = Self::Item>;
226
227    /// The type of item that the parallel iterator will produce.
228    type Item: Send;
229
230    /// Converts `self` into a parallel iterator.
231    ///
232    /// # Examples
233    ///
234    /// ```
235    /// use par_iter::prelude::*;
236    ///
237    /// println!("counting in parallel:");
238    /// (0..100).into_par_iter()
239    ///     .for_each(|i| println!("{}", i));
240    /// ```
241    ///
242    /// This conversion is often implicit for arguments to methods like [`zip`].
243    ///
244    /// ```
245    /// use par_iter::prelude::*;
246    ///
247    /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
248    /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
249    /// ```
250    ///
251    /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
252    fn into_par_iter(self) -> Self::Iter;
253}
254
255/// `IntoParallelRefIterator` implements the conversion to a
256/// [`ParallelIterator`], providing shared references to the data.
257///
258/// This is a parallel version of the `iter()` method
259/// defined by various collections.
260///
261/// This trait is automatically implemented
262/// `for I where &I: IntoParallelIterator`. In most cases, users
263/// will want to implement [`IntoParallelIterator`] rather than implement
264/// this trait directly.
265///
266/// [`ParallelIterator`]: trait.ParallelIterator.html
267/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
268pub trait IntoParallelRefIterator<'data> {
269    /// The type of the parallel iterator that will be returned.
270    type Iter: ParallelIterator<Item = Self::Item>;
271
272    /// The type of item that the parallel iterator will produce.
273    /// This will typically be an `&'data T` reference type.
274    type Item: Send + 'data;
275
276    /// Converts `self` into a parallel iterator.
277    ///
278    /// # Examples
279    ///
280    /// ```
281    /// use par_iter::prelude::*;
282    ///
283    /// let v: Vec<_> = (0..100).collect();
284    /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
285    ///
286    /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
287    /// // producing the exact same references.
288    /// assert!(v.par_iter().zip(&v)
289    ///          .all(|(a, b)| std::ptr::eq(a, b)));
290    /// ```
291    fn par_iter(&'data self) -> Self::Iter;
292}
293
294impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
295where
296    &'data I: IntoParallelIterator,
297{
298    type Item = <&'data I as IntoParallelIterator>::Item;
299    type Iter = <&'data I as IntoParallelIterator>::Iter;
300
301    fn par_iter(&'data self) -> Self::Iter {
302        self.into_par_iter()
303    }
304}
305
306/// `IntoParallelRefMutIterator` implements the conversion to a
307/// [`ParallelIterator`], providing mutable references to the data.
308///
309/// This is a parallel version of the `iter_mut()` method
310/// defined by various collections.
311///
312/// This trait is automatically implemented
313/// `for I where &mut I: IntoParallelIterator`. In most cases, users
314/// will want to implement [`IntoParallelIterator`] rather than implement
315/// this trait directly.
316///
317/// [`ParallelIterator`]: trait.ParallelIterator.html
318/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
319pub trait IntoParallelRefMutIterator<'data> {
320    /// The type of iterator that will be created.
321    type Iter: ParallelIterator<Item = Self::Item>;
322
323    /// The type of item that will be produced; this is typically an
324    /// `&'data mut T` reference.
325    type Item: Send + 'data;
326
327    /// Creates the parallel iterator from `self`.
328    ///
329    /// # Examples
330    ///
331    /// ```
332    /// use par_iter::prelude::*;
333    ///
334    /// let mut v = vec![0usize; 5];
335    /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
336    /// assert_eq!(v, [0, 1, 2, 3, 4]);
337    /// ```
338    fn par_iter_mut(&'data mut self) -> Self::Iter;
339}
340
341impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
342where
343    &'data mut I: IntoParallelIterator,
344{
345    type Item = <&'data mut I as IntoParallelIterator>::Item;
346    type Iter = <&'data mut I as IntoParallelIterator>::Iter;
347
348    fn par_iter_mut(&'data mut self) -> Self::Iter {
349        self.into_par_iter()
350    }
351}
352
353/// Parallel version of the standard iterator trait.
354///
355/// The combinators on this trait are available on **all** parallel
356/// iterators.  Additional methods can be found on the
357/// [`IndexedParallelIterator`] trait: those methods are only
358/// available for parallel iterators where the number of items is
359/// known in advance (so, e.g., after invoking `filter`, those methods
360/// become unavailable).
361///
362/// For examples of using parallel iterators, see [the docs on the
363/// `iter` module][iter].
364///
365/// [iter]: index.html
366/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
367pub trait ParallelIterator: Sized + Send {
368    /// The type of item that this parallel iterator produces.
369    /// For example, if you use the [`for_each`] method, this is the type of
370    /// item that your closure will be invoked with.
371    ///
372    /// [`for_each`]: #method.for_each
373    type Item: Send;
374
375    /// Executes `OP` on each item produced by the iterator, in parallel.
376    ///
377    /// # Examples
378    ///
379    /// ```
380    /// use par_iter::prelude::*;
381    ///
382    /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
383    /// ```
384    fn for_each<OP>(self, op: OP)
385    where
386        OP: Fn(Self::Item) + Sync + Send,
387    {
388        for_each::for_each(self, &op)
389    }
390
391    /// Executes `OP` on the given `init` value with each item produced by
392    /// the iterator, in parallel.
393    ///
394    /// The `init` value will be cloned only as needed to be paired with
395    /// the group of items in each rayon job.  It does not require the type
396    /// to be `Sync`.
397    ///
398    /// # Examples
399    ///
400    /// ```
401    /// use std::sync::mpsc::channel;
402    /// use par_iter::prelude::*;
403    ///
404    /// let (sender, receiver) = channel();
405    ///
406    /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
407    ///
408    /// let mut res: Vec<_> = receiver.iter().collect();
409    ///
410    /// res.sort();
411    ///
412    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
413    /// ```
414    fn for_each_with<OP, T>(self, init: T, op: OP)
415    where
416        OP: Fn(&mut T, Self::Item) + Sync + Send,
417        T: Send + Clone,
418    {
419        self.map_with(init, op).collect()
420    }
421
422    /// Executes `OP` on a value returned by `init` with each item produced by
423    /// the iterator, in parallel.
424    ///
425    /// The `init` function will be called only as needed for a value to be
426    /// paired with the group of items in each rayon job.  There is no
427    /// constraint on that returned type at all!
428    ///
429    /// # Examples
430    ///
431    /// ```
432    /// use rand::Rng;
433    /// use par_iter::prelude::*;
434    ///
435    /// let mut v = vec![0u8; 1_000_000];
436    ///
437    /// v.par_chunks_mut(1000)
438    ///     .for_each_init(
439    ///         || rand::thread_rng(),
440    ///         |rng, chunk| rng.fill(chunk),
441    ///     );
442    ///
443    /// // There's a remote chance that this will fail...
444    /// for i in 0u8..=255 {
445    ///     assert!(v.contains(&i));
446    /// }
447    /// ```
448    fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
449    where
450        OP: Fn(&mut T, Self::Item) + Sync + Send,
451        INIT: Fn() -> T + Sync + Send,
452    {
453        self.map_init(init, op).collect()
454    }
455
456    /// Executes a fallible `OP` on each item produced by the iterator, in
457    /// parallel.
458    ///
459    /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
460    /// stop processing the rest of the items in the iterator as soon as
461    /// possible, and we will return that terminating value.  Otherwise, we will
462    /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
463    /// multiple errors in parallel, it is not specified which will be returned.
464    ///
465    /// # Examples
466    ///
467    /// ```
468    /// use par_iter::prelude::*;
469    /// use std::io::{self, Write};
470    ///
471    /// // This will stop iteration early if there's any write error, like
472    /// // having piped output get closed on the other end.
473    /// (0..100).into_par_iter()
474    ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
475    ///     .expect("expected no write errors");
476    /// ```
477    fn try_for_each<OP, R>(self, op: OP) -> R
478    where
479        OP: Fn(Self::Item) -> R + Sync + Send,
480        R: Try<Output = ()> + Send,
481    {
482        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
483            R::from_output(())
484        }
485
486        self.map(op).try_reduce(<()>::default, ok)
487    }
488
489    /// Executes a fallible `OP` on the given `init` value with each item
490    /// produced by the iterator, in parallel.
491    ///
492    /// This combines the `init` semantics of [`for_each_with()`] and the
493    /// failure semantics of [`try_for_each()`].
494    ///
495    /// [`for_each_with()`]: #method.for_each_with
496    /// [`try_for_each()`]: #method.try_for_each
497    ///
498    /// # Examples
499    ///
500    /// ```
501    /// use std::sync::mpsc::channel;
502    /// use par_iter::prelude::*;
503    ///
504    /// let (sender, receiver) = channel();
505    ///
506    /// (0..5).into_par_iter()
507    ///     .try_for_each_with(sender, |s, x| s.send(x))
508    ///     .expect("expected no send errors");
509    ///
510    /// let mut res: Vec<_> = receiver.iter().collect();
511    ///
512    /// res.sort();
513    ///
514    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
515    /// ```
516    fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
517    where
518        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
519        T: Send + Clone,
520        R: Try<Output = ()> + Send,
521    {
522        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
523            R::from_output(())
524        }
525
526        self.map_with(init, op).try_reduce(<()>::default, ok)
527    }
528
529    /// Executes a fallible `OP` on a value returned by `init` with each item
530    /// produced by the iterator, in parallel.
531    ///
532    /// This combines the `init` semantics of [`for_each_init()`] and the
533    /// failure semantics of [`try_for_each()`].
534    ///
535    /// [`for_each_init()`]: #method.for_each_init
536    /// [`try_for_each()`]: #method.try_for_each
537    ///
538    /// # Examples
539    ///
540    /// ```
541    /// use rand::Rng;
542    /// use par_iter::prelude::*;
543    ///
544    /// let mut v = vec![0u8; 1_000_000];
545    ///
546    /// v.par_chunks_mut(1000)
547    ///     .try_for_each_init(
548    ///         || rand::thread_rng(),
549    ///         |rng, chunk| rng.try_fill(chunk),
550    ///     )
551    ///     .expect("expected no rand errors");
552    ///
553    /// // There's a remote chance that this will fail...
554    /// for i in 0u8..=255 {
555    ///     assert!(v.contains(&i));
556    /// }
557    /// ```
558    fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
559    where
560        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
561        INIT: Fn() -> T + Sync + Send,
562        R: Try<Output = ()> + Send,
563    {
564        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
565            R::from_output(())
566        }
567
568        self.map_init(init, op).try_reduce(<()>::default, ok)
569    }
570
571    /// Counts the number of items in this parallel iterator.
572    ///
573    /// # Examples
574    ///
575    /// ```
576    /// use par_iter::prelude::*;
577    ///
578    /// let count = (0..100).into_par_iter().count();
579    ///
580    /// assert_eq!(count, 100);
581    /// ```
582    fn count(self) -> usize {
583        fn one<T>(_: T) -> usize {
584            1
585        }
586
587        self.map(one).sum()
588    }
589
590    /// Applies `map_op` to each item of this iterator, producing a new
591    /// iterator with the results.
592    ///
593    /// # Examples
594    ///
595    /// ```
596    /// use par_iter::prelude::*;
597    ///
598    /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
599    ///
600    /// let doubles: Vec<_> = par_iter.collect();
601    ///
602    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
603    /// ```
604    fn map<F, R>(self, map_op: F) -> Map<Self, F>
605    where
606        F: Fn(Self::Item) -> R + Sync + Send,
607        R: Send,
608    {
609        Map::new(self, map_op)
610    }
611
612    /// Applies `map_op` to the given `init` value with each item of this
613    /// iterator, producing a new iterator with the results.
614    ///
615    /// The `init` value will be cloned only as needed to be paired with
616    /// the group of items in each rayon job.  It does not require the type
617    /// to be `Sync`.
618    ///
619    /// # Examples
620    ///
621    /// ```
622    /// use std::sync::mpsc::channel;
623    /// use par_iter::prelude::*;
624    ///
625    /// let (sender, receiver) = channel();
626    ///
627    /// let a: Vec<_> = (0..5)
628    ///                 .into_par_iter()            // iterating over i32
629    ///                 .map_with(sender, |s, x| {
630    ///                     s.send(x).unwrap();     // sending i32 values through the channel
631    ///                     x                       // returning i32
632    ///                 })
633    ///                 .collect();                 // collecting the returned values into a vector
634    ///
635    /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
636    ///                             .collect();     // and collecting them
637    /// b.sort();
638    ///
639    /// assert_eq!(a, b);
640    /// ```
641    fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
642    where
643        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
644        T: Send + Clone,
645        R: Send,
646    {
647        MapWith::new(self, init, map_op)
648    }
649
650    /// Applies `map_op` to a value returned by `init` with each item of this
651    /// iterator, producing a new iterator with the results.
652    ///
653    /// The `init` function will be called only as needed for a value to be
654    /// paired with the group of items in each rayon job.  There is no
655    /// constraint on that returned type at all!
656    ///
657    /// # Examples
658    ///
659    /// ```
660    /// use rand::Rng;
661    /// use par_iter::prelude::*;
662    ///
663    /// let a: Vec<_> = (1i32..1_000_000)
664    ///     .into_par_iter()
665    ///     .map_init(
666    ///         || rand::thread_rng(),  // get the thread-local RNG
667    ///         |rng, x| if rng.gen() { // randomly negate items
668    ///             -x
669    ///         } else {
670    ///             x
671    ///         },
672    ///     ).collect();
673    ///
674    /// // There's a remote chance that this will fail...
675    /// assert!(a.iter().any(|&x| x < 0));
676    /// assert!(a.iter().any(|&x| x > 0));
677    /// ```
678    fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
679    where
680        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
681        INIT: Fn() -> T + Sync + Send,
682        R: Send,
683    {
684        MapInit::new(self, init, map_op)
685    }
686
687    /// Creates an iterator which clones all of its elements.  This may be
688    /// useful when you have an iterator over `&T`, but you need `T`, and
689    /// that type implements `Clone`. See also [`copied()`].
690    ///
691    /// [`copied()`]: #method.copied
692    ///
693    /// # Examples
694    ///
695    /// ```
696    /// use par_iter::prelude::*;
697    ///
698    /// let a = [1, 2, 3];
699    ///
700    /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
701    ///
702    /// // cloned is the same as .map(|&x| x), for integers
703    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
704    ///
705    /// assert_eq!(v_cloned, vec![1, 2, 3]);
706    /// assert_eq!(v_map, vec![1, 2, 3]);
707    /// ```
708    fn cloned<'a, T>(self) -> Cloned<Self>
709    where
710        T: 'a + Clone + Send,
711        Self: ParallelIterator<Item = &'a T>,
712    {
713        Cloned::new(self)
714    }
715
716    /// Creates an iterator which copies all of its elements.  This may be
717    /// useful when you have an iterator over `&T`, but you need `T`, and
718    /// that type implements `Copy`. See also [`cloned()`].
719    ///
720    /// [`cloned()`]: #method.cloned
721    ///
722    /// # Examples
723    ///
724    /// ```
725    /// use par_iter::prelude::*;
726    ///
727    /// let a = [1, 2, 3];
728    ///
729    /// let v_copied: Vec<_> = a.par_iter().copied().collect();
730    ///
731    /// // copied is the same as .map(|&x| x), for integers
732    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
733    ///
734    /// assert_eq!(v_copied, vec![1, 2, 3]);
735    /// assert_eq!(v_map, vec![1, 2, 3]);
736    /// ```
737    fn copied<'a, T>(self) -> Copied<Self>
738    where
739        T: 'a + Copy + Send,
740        Self: ParallelIterator<Item = &'a T>,
741    {
742        Copied::new(self)
743    }
744
745    /// Applies `inspect_op` to a reference to each item of this iterator,
746    /// producing a new iterator passing through the original items.  This is
747    /// often useful for debugging to see what's happening in iterator stages.
748    ///
749    /// # Examples
750    ///
751    /// ```
752    /// use par_iter::prelude::*;
753    ///
754    /// let a = [1, 4, 2, 3];
755    ///
756    /// // this iterator sequence is complex.
757    /// let sum = a.par_iter()
758    ///             .cloned()
759    ///             .filter(|&x| x % 2 == 0)
760    ///             .reduce(|| 0, |sum, i| sum + i);
761    ///
762    /// println!("{}", sum);
763    ///
764    /// // let's add some inspect() calls to investigate what's happening
765    /// let sum = a.par_iter()
766    ///             .cloned()
767    ///             .inspect(|x| println!("about to filter: {}", x))
768    ///             .filter(|&x| x % 2 == 0)
769    ///             .inspect(|x| println!("made it through filter: {}", x))
770    ///             .reduce(|| 0, |sum, i| sum + i);
771    ///
772    /// println!("{}", sum);
773    /// ```
774    fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
775    where
776        OP: Fn(&Self::Item) + Sync + Send,
777    {
778        Inspect::new(self, inspect_op)
779    }
780
781    /// Mutates each item of this iterator before yielding it.
782    ///
783    /// # Examples
784    ///
785    /// ```
786    /// use par_iter::prelude::*;
787    ///
788    /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
789    ///
790    /// let doubles: Vec<_> = par_iter.collect();
791    ///
792    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
793    /// ```
794    fn update<F>(self, update_op: F) -> Update<Self, F>
795    where
796        F: Fn(&mut Self::Item) + Sync + Send,
797    {
798        Update::new(self, update_op)
799    }
800
801    /// Applies `filter_op` to each item of this iterator, producing a new
802    /// iterator with only the items that gave `true` results.
803    ///
804    /// # Examples
805    ///
806    /// ```
807    /// use par_iter::prelude::*;
808    ///
809    /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
810    ///
811    /// let even_numbers: Vec<_> = par_iter.collect();
812    ///
813    /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
814    /// ```
815    fn filter<P>(self, filter_op: P) -> Filter<Self, P>
816    where
817        P: Fn(&Self::Item) -> bool + Sync + Send,
818    {
819        Filter::new(self, filter_op)
820    }
821
822    /// Applies `filter_op` to each item of this iterator to get an `Option`,
823    /// producing a new iterator with only the items from `Some` results.
824    ///
825    /// # Examples
826    ///
827    /// ```
828    /// use par_iter::prelude::*;
829    ///
830    /// let mut par_iter = (0..10).into_par_iter()
831    ///                         .filter_map(|x| {
832    ///                             if x % 2 == 0 { Some(x * 3) }
833    ///                             else { None }
834    ///                         });
835    ///
836    /// let even_numbers: Vec<_> = par_iter.collect();
837    ///
838    /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
839    /// ```
840    fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
841    where
842        P: Fn(Self::Item) -> Option<R> + Sync + Send,
843        R: Send,
844    {
845        FilterMap::new(self, filter_op)
846    }
847
848    /// Applies `map_op` to each item of this iterator to get nested parallel
849    /// iterators, producing a new parallel iterator that flattens these
850    /// back into one.
851    ///
852    /// See also [`flat_map_iter`](#method.flat_map_iter).
853    ///
854    /// # Examples
855    ///
856    /// ```
857    /// use par_iter::prelude::*;
858    ///
859    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
860    ///
861    /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
862    ///
863    /// let vec: Vec<_> = par_iter.collect();
864    ///
865    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
866    /// ```
867    fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
868    where
869        F: Fn(Self::Item) -> PI + Sync + Send,
870        PI: IntoParallelIterator,
871    {
872        FlatMap::new(self, map_op)
873    }
874
875    /// Applies `map_op` to each item of this iterator to get nested serial
876    /// iterators, producing a new parallel iterator that flattens these
877    /// back into one.
878    ///
879    /// # `flat_map_iter` versus `flat_map`
880    ///
881    /// These two methods are similar but behave slightly differently. With
882    /// [`flat_map`], each of the nested iterators must be a parallel
883    /// iterator, and they will be further split up with nested parallelism.
884    /// With `flat_map_iter`, each nested iterator is a sequential
885    /// `Iterator`, and we only parallelize _between_ them, while the items
886    /// produced by each nested iterator are processed sequentially.
887    ///
888    /// When choosing between these methods, consider whether nested parallelism
889    /// suits the potential iterators at hand. If there's little computation
890    /// involved, or its length is much less than the outer parallel
891    /// iterator, then it may perform better to avoid the overhead of
892    /// parallelism, just flattening sequentially with `flat_map_iter`.
893    /// If there is a lot of computation, potentially outweighing the outer
894    /// parallel iterator, then the nested parallelism of `flat_map` may be
895    /// worthwhile.
896    ///
897    /// [`flat_map`]: #method.flat_map
898    ///
899    /// # Examples
900    ///
901    /// ```
902    /// use par_iter::prelude::*;
903    /// use std::cell::RefCell;
904    ///
905    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
906    ///
907    /// let par_iter = a.par_iter().flat_map_iter(|a| {
908    ///     // The serial iterator doesn't have to be thread-safe, just its items.
909    ///     let cell_iter = RefCell::new(a.iter().cloned());
910    ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
911    /// });
912    ///
913    /// let vec: Vec<_> = par_iter.collect();
914    ///
915    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
916    /// ```
917    fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
918    where
919        F: Fn(Self::Item) -> SI + Sync + Send,
920        SI: IntoIterator,
921        SI::Item: Send,
922    {
923        FlatMapIter::new(self, map_op)
924    }
925
926    /// An adaptor that flattens parallel-iterable `Item`s into one large
927    /// iterator.
928    ///
929    /// See also [`flatten_iter`](#method.flatten_iter).
930    ///
931    /// # Examples
932    ///
933    /// ```
934    /// use par_iter::prelude::*;
935    ///
936    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
937    /// let y: Vec<_> = x.into_par_iter().flatten().collect();
938    ///
939    /// assert_eq!(y, vec![1, 2, 3, 4]);
940    /// ```
941    fn flatten(self) -> Flatten<Self>
942    where
943        Self::Item: IntoParallelIterator,
944    {
945        Flatten::new(self)
946    }
947
948    /// An adaptor that flattens serial-iterable `Item`s into one large
949    /// iterator.
950    ///
951    /// See also [`flatten`](#method.flatten) and the analogous comparison of
952    /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
953    ///
954    /// # Examples
955    ///
956    /// ```
957    /// use par_iter::prelude::*;
958    ///
959    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
960    /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
961    /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
962    ///
963    /// assert_eq!(y, vec![1, 2, 3, 4]);
964    /// ```
965    fn flatten_iter(self) -> FlattenIter<Self>
966    where
967        Self::Item: IntoIterator,
968        <Self::Item as IntoIterator>::Item: Send,
969    {
970        FlattenIter::new(self)
971    }
972
973    /// Reduces the items in the iterator into one item using `op`.
974    /// The argument `identity` should be a closure that can produce
975    /// "identity" value which may be inserted into the sequence as
976    /// needed to create opportunities for parallel execution. So, for
977    /// example, if you are doing a summation, then `identity()` ought
978    /// to produce something that represents the zero for your type
979    /// (but consider just calling `sum()` in that case).
980    ///
981    /// # Examples
982    ///
983    /// ```
984    /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
985    /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
986    /// // where the first/second elements are summed separately.
987    /// use par_iter::prelude::*;
988    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
989    ///            .par_iter()        // iterating over &(i32, i32)
990    ///            .cloned()          // iterating over (i32, i32)
991    ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
992    ///                    |a, b| (a.0 + b.0, a.1 + b.1));
993    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
994    /// ```
995    ///
996    /// **Note:** unlike a sequential `fold` operation, the order in
997    /// which `op` will be applied to reduce the result is not fully
998    /// specified. So `op` should be [associative] or else the results
999    /// will be non-deterministic. And of course `identity()` should
1000    /// produce a true identity.
1001    ///
1002    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1003    fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
1004    where
1005        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1006        ID: Fn() -> Self::Item + Sync + Send,
1007    {
1008        reduce::reduce(self, identity, op)
1009    }
1010
1011    /// Reduces the items in the iterator into one item using `op`.
1012    /// If the iterator is empty, `None` is returned; otherwise,
1013    /// `Some` is returned.
1014    ///
1015    /// This version of `reduce` is simple but somewhat less
1016    /// efficient. If possible, it is better to call `reduce()`, which
1017    /// requires an identity element.
1018    ///
1019    /// # Examples
1020    ///
1021    /// ```
1022    /// use par_iter::prelude::*;
1023    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1024    ///            .par_iter()        // iterating over &(i32, i32)
1025    ///            .cloned()          // iterating over (i32, i32)
1026    ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1027    ///            .unwrap();
1028    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1029    /// ```
1030    ///
1031    /// **Note:** unlike a sequential `fold` operation, the order in
1032    /// which `op` will be applied to reduce the result is not fully
1033    /// specified. So `op` should be [associative] or else the results
1034    /// will be non-deterministic.
1035    ///
1036    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1037    fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1038    where
1039        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1040    {
1041        fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1042            move |opt_a, b| match opt_a {
1043                Some(a) => Some(op(a, b)),
1044                None => Some(b),
1045            }
1046        }
1047
1048        fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1049            move |opt_a, opt_b| match (opt_a, opt_b) {
1050                (Some(a), Some(b)) => Some(op(a, b)),
1051                (Some(v), None) | (None, Some(v)) => Some(v),
1052                (None, None) => None,
1053            }
1054        }
1055
1056        self.fold(<_>::default, opt_fold(&op))
1057            .reduce(<_>::default, opt_reduce(&op))
1058    }
1059
1060    /// Reduces the items in the iterator into one item using a fallible `op`.
1061    /// The `identity` argument is used the same way as in [`reduce()`].
1062    ///
1063    /// [`reduce()`]: #method.reduce
1064    ///
1065    /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1066    /// to one, we will attempt to stop processing the rest of the items in the
1067    /// iterator as soon as possible, and we will return that terminating value.
1068    /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1069    /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1070    /// specified which will be returned.
1071    ///
1072    /// # Examples
1073    ///
1074    /// ```
1075    /// use par_iter::prelude::*;
1076    ///
1077    /// // Compute the sum of squares, being careful about overflow.
1078    /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1079    ///     iter.into_par_iter()
1080    ///         .map(|i| i.checked_mul(i))            // square each item,
1081    ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1082    /// }
1083    /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1084    ///
1085    /// // The sum might overflow
1086    /// assert_eq!(sum_squares(0..10_000), None);
1087    ///
1088    /// // Or the squares might overflow before it even reaches `try_reduce`
1089    /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1090    /// ```
1091    fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1092    where
1093        OP: Fn(T, T) -> Self::Item + Sync + Send,
1094        ID: Fn() -> T + Sync + Send,
1095        Self::Item: Try<Output = T>,
1096    {
1097        try_reduce::try_reduce(self, identity, op)
1098    }
1099
1100    /// Reduces the items in the iterator into one item using a fallible `op`.
1101    ///
1102    /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1103    /// otherwise, `Some` is returned.  Beyond that, it behaves like
1104    /// [`try_reduce()`] for handling `Err`/`None`.
1105    ///
1106    /// [`reduce_with()`]: #method.reduce_with
1107    /// [`try_reduce()`]: #method.try_reduce
1108    ///
1109    /// For instance, with `Option` items, the return value may be:
1110    /// - `None`, the iterator was empty
1111    /// - `Some(None)`, we stopped after encountering `None`.
1112    /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1113    ///
1114    /// With `Result` items, the nesting is more obvious:
1115    /// - `None`, the iterator was empty
1116    /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1117    /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1118    ///
1119    /// # Examples
1120    ///
1121    /// ```
1122    /// use par_iter::prelude::*;
1123    ///
1124    /// let files = ["/dev/null", "/does/not/exist"];
1125    ///
1126    /// // Find the biggest file
1127    /// files.into_par_iter()
1128    ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1129    ///     .try_reduce_with(|a, b| {
1130    ///         Ok(if a.1 >= b.1 { a } else { b })
1131    ///     })
1132    ///     .expect("Some value, since the iterator is not empty")
1133    ///     .expect_err("not found");
1134    /// ```
1135    fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1136    where
1137        OP: Fn(T, T) -> Self::Item + Sync + Send,
1138        Self::Item: Try<Output = T>,
1139    {
1140        try_reduce_with::try_reduce_with(self, op)
1141    }
1142
1143    /// Parallel fold is similar to sequential fold except that the
1144    /// sequence of items may be subdivided before it is
1145    /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1146    /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1147    /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1148    /// 77, and so forth. The **parallel fold** works similarly except
1149    /// that it first breaks up your list into sublists, and hence
1150    /// instead of yielding up a single sum at the end, it yields up
1151    /// multiple sums. The number of results is nondeterministic, as
1152    /// is the point where the breaks occur.
1153    ///
1154    /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1155    /// our example list, we might wind up with a sequence of two numbers,
1156    /// like so:
1157    ///
1158    /// ```notrust
1159    /// 22 3 77 89 46
1160    ///       |     |
1161    ///     102   135
1162    /// ```
1163    ///
1164    /// Or perhaps these three numbers:
1165    ///
1166    /// ```notrust
1167    /// 22 3 77 89 46
1168    ///       |  |  |
1169    ///     102 89 46
1170    /// ```
1171    ///
1172    /// In general, Rayon will attempt to find good breaking points
1173    /// that keep all of your cores busy.
1174    ///
1175    /// ### Fold versus reduce
1176    ///
1177    /// The `fold()` and `reduce()` methods each take an identity element
1178    /// and a combining function, but they operate rather differently.
1179    ///
1180    /// `reduce()` requires that the identity function has the same
1181    /// type as the things you are iterating over, and it fully
1182    /// reduces the list of items into a single item. So, for example,
1183    /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1184    /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1185    /// u8| a + b)`, we would get an overflow. This is because `0`,
1186    /// `a`, and `b` here are all bytes, just like the numbers in the
1187    /// list (I wrote the types explicitly above, but those are the
1188    /// only types you can use). To avoid the overflow, we would need
1189    /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1190    /// b| a + b)`, in which case our result would be `256`.
1191    ///
1192    /// In contrast, with `fold()`, the identity function does not
1193    /// have to have the same type as the things you are iterating
1194    /// over, and you potentially get back many results. So, if we
1195    /// continue with the `bytes` example from the previous paragraph,
1196    /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1197    /// convert our bytes into `u32`. And of course we might not get
1198    /// back a single sum.
1199    ///
1200    /// There is a more subtle distinction as well, though it's
1201    /// actually implied by the above points. When you use `reduce()`,
1202    /// your reduction function is sometimes called with values that
1203    /// were never part of your original parallel iterator (for
1204    /// example, both the left and right might be a partial sum). With
1205    /// `fold()`, in contrast, the left value in the fold function is
1206    /// always the accumulator, and the right value is always from
1207    /// your original sequence.
1208    ///
1209    /// ### Fold vs Map/Reduce
1210    ///
1211    /// Fold makes sense if you have some operation where it is
1212    /// cheaper to create groups of elements at a time. For example,
1213    /// imagine collecting characters into a string. If you were going
1214    /// to use map/reduce, you might try this:
1215    ///
1216    /// ```
1217    /// use par_iter::prelude::*;
1218    ///
1219    /// let s =
1220    ///     ['a', 'b', 'c', 'd', 'e']
1221    ///     .par_iter()
1222    ///     .map(|c: &char| format!("{}", c))
1223    ///     .reduce(|| String::new(),
1224    ///             |mut a: String, b: String| { a.push_str(&b); a });
1225    ///
1226    /// assert_eq!(s, "abcde");
1227    /// ```
1228    ///
1229    /// Because reduce produces the same type of element as its input,
1230    /// you have to first map each character into a string, and then
1231    /// you can reduce them. This means we create one string per
1232    /// element in our iterator -- not so great. Using `fold`, we can
1233    /// do this instead:
1234    ///
1235    /// ```
1236    /// use par_iter::prelude::*;
1237    ///
1238    /// let s =
1239    ///     ['a', 'b', 'c', 'd', 'e']
1240    ///     .par_iter()
1241    ///     .fold(|| String::new(),
1242    ///             |mut s: String, c: &char| { s.push(*c); s })
1243    ///     .reduce(|| String::new(),
1244    ///             |mut a: String, b: String| { a.push_str(&b); a });
1245    ///
1246    /// assert_eq!(s, "abcde");
1247    /// ```
1248    ///
1249    /// Now `fold` will process groups of our characters at a time,
1250    /// and we only make one string per group. We should wind up with
1251    /// some small-ish number of strings roughly proportional to the
1252    /// number of CPUs you have (it will ultimately depend on how busy
1253    /// your processors are). Note that we still need to do a reduce
1254    /// afterwards to combine those groups of strings into a single
1255    /// string.
1256    ///
1257    /// You could use a similar trick to save partial results (e.g., a
1258    /// cache) or something similar.
1259    ///
1260    /// ### Combining fold with other operations
1261    ///
1262    /// You can combine `fold` with `reduce` if you want to produce a
1263    /// single value. This is then roughly equivalent to a map/reduce
1264    /// combination in effect:
1265    ///
1266    /// ```
1267    /// use par_iter::prelude::*;
1268    ///
1269    /// let bytes = 0..22_u8;
1270    /// let sum = bytes.into_par_iter()
1271    ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1272    ///                .sum::<u32>();
1273    ///
1274    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1275    /// ```
1276    fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1277    where
1278        F: Fn(T, Self::Item) -> T + Sync + Send,
1279        ID: Fn() -> T + Sync + Send,
1280        T: Send,
1281    {
1282        Fold::new(self, identity, fold_op)
1283    }
1284
1285    /// Applies `fold_op` to the given `init` value with each item of this
1286    /// iterator, finally producing the value for further use.
1287    ///
1288    /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1289    /// it doesn't require the `init` type to be `Sync`, nor any other form
1290    /// of added synchronization.
1291    ///
1292    /// # Examples
1293    ///
1294    /// ```
1295    /// use par_iter::prelude::*;
1296    ///
1297    /// let bytes = 0..22_u8;
1298    /// let sum = bytes.into_par_iter()
1299    ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1300    ///                .sum::<u32>();
1301    ///
1302    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1303    /// ```
1304    fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1305    where
1306        F: Fn(T, Self::Item) -> T + Sync + Send,
1307        T: Send + Clone,
1308    {
1309        FoldWith::new(self, init, fold_op)
1310    }
1311
1312    /// Performs a fallible parallel fold.
1313    ///
1314    /// This is a variation of [`fold()`] for operations which can fail with
1315    /// `Option::None` or `Result::Err`.  The first such failure stops
1316    /// processing the local set of items, without affecting other folds in the
1317    /// iterator's subdivisions.
1318    ///
1319    /// Often, `try_fold()` will be followed by [`try_reduce()`]
1320    /// for a final reduction and global short-circuiting effect.
1321    ///
1322    /// [`fold()`]: #method.fold
1323    /// [`try_reduce()`]: #method.try_reduce
1324    ///
1325    /// # Examples
1326    ///
1327    /// ```
1328    /// use par_iter::prelude::*;
1329    ///
1330    /// let bytes = 0..22_u8;
1331    /// let sum = bytes.into_par_iter()
1332    ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1333    ///                .try_reduce(|| 0, u32::checked_add);
1334    ///
1335    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1336    /// ```
1337    fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1338    where
1339        F: Fn(T, Self::Item) -> R + Sync + Send,
1340        ID: Fn() -> T + Sync + Send,
1341        R: Try<Output = T> + Send,
1342    {
1343        TryFold::new(self, identity, fold_op)
1344    }
1345
1346    /// Performs a fallible parallel fold with a cloneable `init` value.
1347    ///
1348    /// This combines the `init` semantics of [`fold_with()`] and the failure
1349    /// semantics of [`try_fold()`].
1350    ///
1351    /// [`fold_with()`]: #method.fold_with
1352    /// [`try_fold()`]: #method.try_fold
1353    ///
1354    /// ```
1355    /// use par_iter::prelude::*;
1356    ///
1357    /// let bytes = 0..22_u8;
1358    /// let sum = bytes.into_par_iter()
1359    ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1360    ///                .try_reduce(|| 0, u32::checked_add);
1361    ///
1362    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1363    /// ```
1364    fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1365    where
1366        F: Fn(T, Self::Item) -> R + Sync + Send,
1367        R: Try<Output = T> + Send,
1368        T: Clone + Send,
1369    {
1370        TryFoldWith::new(self, init, fold_op)
1371    }
1372
1373    /// Sums up the items in the iterator.
1374    ///
1375    /// Note that the order in items will be reduced is not specified,
1376    /// so if the `+` operator is not truly [associative] \(as is the
1377    /// case for floating point numbers), then the results are not
1378    /// fully deterministic.
1379    ///
1380    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1381    ///
1382    /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1383    /// except that the type of `0` and the `+` operation may vary
1384    /// depending on the type of value being produced.
1385    ///
1386    /// # Examples
1387    ///
1388    /// ```
1389    /// use par_iter::prelude::*;
1390    ///
1391    /// let a = [1, 5, 7];
1392    ///
1393    /// let sum: i32 = a.par_iter().sum();
1394    ///
1395    /// assert_eq!(sum, 13);
1396    /// ```
1397    fn sum<S>(self) -> S
1398    where
1399        S: Send + Sum<Self::Item> + Sum<S>,
1400    {
1401        sum::sum(self)
1402    }
1403
1404    /// Multiplies all the items in the iterator.
1405    ///
1406    /// Note that the order in items will be reduced is not specified,
1407    /// so if the `*` operator is not truly [associative] \(as is the
1408    /// case for floating point numbers), then the results are not
1409    /// fully deterministic.
1410    ///
1411    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1412    ///
1413    /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1414    /// except that the type of `1` and the `*` operation may vary
1415    /// depending on the type of value being produced.
1416    ///
1417    /// # Examples
1418    ///
1419    /// ```
1420    /// use par_iter::prelude::*;
1421    ///
1422    /// fn factorial(n: u32) -> u32 {
1423    ///    (1..n+1).into_par_iter().product()
1424    /// }
1425    ///
1426    /// assert_eq!(factorial(0), 1);
1427    /// assert_eq!(factorial(1), 1);
1428    /// assert_eq!(factorial(5), 120);
1429    /// ```
1430    fn product<P>(self) -> P
1431    where
1432        P: Send + Product<Self::Item> + Product<P>,
1433    {
1434        product::product(self)
1435    }
1436
1437    /// Computes the minimum of all the items in the iterator. If the
1438    /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1439    /// is returned.
1440    ///
1441    /// Note that the order in which the items will be reduced is not
1442    /// specified, so if the `Ord` impl is not truly associative, then
1443    /// the results are not deterministic.
1444    ///
1445    /// Basically equivalent to `self.reduce_with(|a, b| Ord::min(a, b))`.
1446    ///
1447    /// # Examples
1448    ///
1449    /// ```
1450    /// use par_iter::prelude::*;
1451    ///
1452    /// let a = [45, 74, 32];
1453    ///
1454    /// assert_eq!(a.par_iter().min(), Some(&32));
1455    ///
1456    /// let b: [i32; 0] = [];
1457    ///
1458    /// assert_eq!(b.par_iter().min(), None);
1459    /// ```
1460    fn min(self) -> Option<Self::Item>
1461    where
1462        Self::Item: Ord,
1463    {
1464        self.reduce_with(Ord::min)
1465    }
1466
1467    /// Computes the minimum of all the items in the iterator with respect to
1468    /// the given comparison function. If the iterator is empty, `None` is
1469    /// returned; otherwise, `Some(min)` is returned.
1470    ///
1471    /// Note that the order in which the items will be reduced is not
1472    /// specified, so if the comparison function is not associative, then
1473    /// the results are not deterministic.
1474    ///
1475    /// # Examples
1476    ///
1477    /// ```
1478    /// use par_iter::prelude::*;
1479    ///
1480    /// let a = [-3_i32, 77, 53, 240, -1];
1481    ///
1482    /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1483    /// ```
1484    fn min_by<F>(self, f: F) -> Option<Self::Item>
1485    where
1486        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1487    {
1488        fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1489            move |a, b| match f(&a, &b) {
1490                Ordering::Greater => b,
1491                _ => a,
1492            }
1493        }
1494
1495        self.reduce_with(min(f))
1496    }
1497
1498    /// Computes the item that yields the minimum value for the given
1499    /// function. If the iterator is empty, `None` is returned;
1500    /// otherwise, `Some(item)` is returned.
1501    ///
1502    /// Note that the order in which the items will be reduced is not
1503    /// specified, so if the `Ord` impl is not truly associative, then
1504    /// the results are not deterministic.
1505    ///
1506    /// # Examples
1507    ///
1508    /// ```
1509    /// use par_iter::prelude::*;
1510    ///
1511    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1512    ///
1513    /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1514    /// ```
1515    fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1516    where
1517        K: Ord + Send,
1518        F: Sync + Send + Fn(&Self::Item) -> K,
1519    {
1520        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1521            move |x| (f(&x), x)
1522        }
1523
1524        fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1525            match (a.0).cmp(&b.0) {
1526                Ordering::Greater => b,
1527                _ => a,
1528            }
1529        }
1530
1531        let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1532        Some(x)
1533    }
1534
1535    /// Computes the maximum of all the items in the iterator. If the
1536    /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1537    /// is returned.
1538    ///
1539    /// Note that the order in which the items will be reduced is not
1540    /// specified, so if the `Ord` impl is not truly associative, then
1541    /// the results are not deterministic.
1542    ///
1543    /// Basically equivalent to `self.reduce_with(|a, b| Ord::max(a, b))`.
1544    ///
1545    /// # Examples
1546    ///
1547    /// ```
1548    /// use par_iter::prelude::*;
1549    ///
1550    /// let a = [45, 74, 32];
1551    ///
1552    /// assert_eq!(a.par_iter().max(), Some(&74));
1553    ///
1554    /// let b: [i32; 0] = [];
1555    ///
1556    /// assert_eq!(b.par_iter().max(), None);
1557    /// ```
1558    fn max(self) -> Option<Self::Item>
1559    where
1560        Self::Item: Ord,
1561    {
1562        self.reduce_with(Ord::max)
1563    }
1564
1565    /// Computes the maximum of all the items in the iterator with respect to
1566    /// the given comparison function. If the iterator is empty, `None` is
1567    /// returned; otherwise, `Some(max)` is returned.
1568    ///
1569    /// Note that the order in which the items will be reduced is not
1570    /// specified, so if the comparison function is not associative, then
1571    /// the results are not deterministic.
1572    ///
1573    /// # Examples
1574    ///
1575    /// ```
1576    /// use par_iter::prelude::*;
1577    ///
1578    /// let a = [-3_i32, 77, 53, 240, -1];
1579    ///
1580    /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1581    /// ```
1582    fn max_by<F>(self, f: F) -> Option<Self::Item>
1583    where
1584        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1585    {
1586        fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1587            move |a, b| match f(&a, &b) {
1588                Ordering::Greater => a,
1589                _ => b,
1590            }
1591        }
1592
1593        self.reduce_with(max(f))
1594    }
1595
1596    /// Computes the item that yields the maximum value for the given
1597    /// function. If the iterator is empty, `None` is returned;
1598    /// otherwise, `Some(item)` is returned.
1599    ///
1600    /// Note that the order in which the items will be reduced is not
1601    /// specified, so if the `Ord` impl is not truly associative, then
1602    /// the results are not deterministic.
1603    ///
1604    /// # Examples
1605    ///
1606    /// ```
1607    /// use par_iter::prelude::*;
1608    ///
1609    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1610    ///
1611    /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1612    /// ```
1613    fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1614    where
1615        K: Ord + Send,
1616        F: Sync + Send + Fn(&Self::Item) -> K,
1617    {
1618        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1619            move |x| (f(&x), x)
1620        }
1621
1622        fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1623            match (a.0).cmp(&b.0) {
1624                Ordering::Greater => a,
1625                _ => b,
1626            }
1627        }
1628
1629        let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1630        Some(x)
1631    }
1632
1633    /// Takes two iterators and creates a new iterator over both.
1634    ///
1635    /// # Examples
1636    ///
1637    /// ```
1638    /// use par_iter::prelude::*;
1639    ///
1640    /// let a = [0, 1, 2];
1641    /// let b = [9, 8, 7];
1642    ///
1643    /// let par_iter = a.par_iter().chain(b.par_iter());
1644    ///
1645    /// let chained: Vec<_> = par_iter.cloned().collect();
1646    ///
1647    /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1648    /// ```
1649    fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1650    where
1651        C: IntoParallelIterator<Item = Self::Item>,
1652    {
1653        Chain::new(self, chain.into_par_iter())
1654    }
1655
1656    /// Searches for **some** item in the parallel iterator that
1657    /// matches the given predicate and returns it. This operation
1658    /// is similar to [`find` on sequential iterators][find] but
1659    /// the item returned may not be the **first** one in the parallel
1660    /// sequence which matches, since we search the entire sequence in parallel.
1661    ///
1662    /// Once a match is found, we will attempt to stop processing
1663    /// the rest of the items in the iterator as soon as possible
1664    /// (just as `find` stops iterating once a match is found).
1665    ///
1666    /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1667    ///
1668    /// # Examples
1669    ///
1670    /// ```
1671    /// use par_iter::prelude::*;
1672    ///
1673    /// let a = [1, 2, 3, 3];
1674    ///
1675    /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1676    ///
1677    /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1678    /// ```
1679    fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1680    where
1681        P: Fn(&Self::Item) -> bool + Sync + Send,
1682    {
1683        find::find(self, predicate)
1684    }
1685
1686    /// Searches for the sequentially **first** item in the parallel iterator
1687    /// that matches the given predicate and returns it.
1688    ///
1689    /// Once a match is found, all attempts to the right of the match
1690    /// will be stopped, while attempts to the left must continue in case
1691    /// an earlier match is found.
1692    ///
1693    /// For added performance, you might consider using `find_first` in
1694    /// conjunction with
1695    /// [`by_exponential_blocks()`][IndexedParallelIterator::by_exponential_blocks].
1696    ///
1697    /// Note that not all parallel iterators have a useful order, much like
1698    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1699    /// just want the first match that discovered anywhere in the iterator,
1700    /// `find_any` is a better choice.
1701    ///
1702    /// # Examples
1703    ///
1704    /// ```
1705    /// use par_iter::prelude::*;
1706    ///
1707    /// let a = [1, 2, 3, 3];
1708    ///
1709    /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1710    ///
1711    /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1712    /// ```
1713    fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1714    where
1715        P: Fn(&Self::Item) -> bool + Sync + Send,
1716    {
1717        find_first_last::find_first(self, predicate)
1718    }
1719
1720    /// Searches for the sequentially **last** item in the parallel iterator
1721    /// that matches the given predicate and returns it.
1722    ///
1723    /// Once a match is found, all attempts to the left of the match
1724    /// will be stopped, while attempts to the right must continue in case
1725    /// a later match is found.
1726    ///
1727    /// Note that not all parallel iterators have a useful order, much like
1728    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1729    /// order doesn't actually matter to you, `find_any` is a better choice.
1730    ///
1731    /// # Examples
1732    ///
1733    /// ```
1734    /// use par_iter::prelude::*;
1735    ///
1736    /// let a = [1, 2, 3, 3];
1737    ///
1738    /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1739    ///
1740    /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1741    /// ```
1742    fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1743    where
1744        P: Fn(&Self::Item) -> bool + Sync + Send,
1745    {
1746        find_first_last::find_last(self, predicate)
1747    }
1748
1749    /// Applies the given predicate to the items in the parallel iterator
1750    /// and returns **any** non-None result of the map operation.
1751    ///
1752    /// Once a non-None value is produced from the map operation, we will
1753    /// attempt to stop processing the rest of the items in the iterator
1754    /// as soon as possible.
1755    ///
1756    /// Note that this method only returns **some** item in the parallel
1757    /// iterator that is not None from the map predicate. The item returned
1758    /// may not be the **first** non-None value produced in the parallel
1759    /// sequence, since the entire sequence is mapped over in parallel.
1760    ///
1761    /// # Examples
1762    ///
1763    /// ```
1764    /// use par_iter::prelude::*;
1765    ///
1766    /// let c = ["lol", "NaN", "5", "5"];
1767    ///
1768    /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1769    ///
1770    /// assert_eq!(found_number, Some(5));
1771    /// ```
1772    fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1773    where
1774        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1775        R: Send,
1776    {
1777        fn yes<T>(_: &T) -> bool {
1778            true
1779        }
1780        self.filter_map(predicate).find_any(yes)
1781    }
1782
1783    /// Applies the given predicate to the items in the parallel iterator and
1784    /// returns the sequentially **first** non-None result of the map operation.
1785    ///
1786    /// Once a non-None value is produced from the map operation, all attempts
1787    /// to the right of the match will be stopped, while attempts to the left
1788    /// must continue in case an earlier match is found.
1789    ///
1790    /// Note that not all parallel iterators have a useful order, much like
1791    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1792    /// just want the first non-None value discovered anywhere in the iterator,
1793    /// `find_map_any` is a better choice.
1794    ///
1795    /// # Examples
1796    ///
1797    /// ```
1798    /// use par_iter::prelude::*;
1799    ///
1800    /// let c = ["lol", "NaN", "2", "5"];
1801    ///
1802    /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1803    ///
1804    /// assert_eq!(first_number, Some(2));
1805    /// ```
1806    fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1807    where
1808        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1809        R: Send,
1810    {
1811        fn yes<T>(_: &T) -> bool {
1812            true
1813        }
1814        self.filter_map(predicate).find_first(yes)
1815    }
1816
1817    /// Applies the given predicate to the items in the parallel iterator and
1818    /// returns the sequentially **last** non-None result of the map operation.
1819    ///
1820    /// Once a non-None value is produced from the map operation, all attempts
1821    /// to the left of the match will be stopped, while attempts to the right
1822    /// must continue in case a later match is found.
1823    ///
1824    /// Note that not all parallel iterators have a useful order, much like
1825    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1826    /// just want the first non-None value discovered anywhere in the iterator,
1827    /// `find_map_any` is a better choice.
1828    ///
1829    /// # Examples
1830    ///
1831    /// ```
1832    /// use par_iter::prelude::*;
1833    ///
1834    /// let c = ["lol", "NaN", "2", "5"];
1835    ///
1836    /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1837    ///
1838    /// assert_eq!(last_number, Some(5));
1839    /// ```
1840    fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1841    where
1842        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1843        R: Send,
1844    {
1845        fn yes<T>(_: &T) -> bool {
1846            true
1847        }
1848        self.filter_map(predicate).find_last(yes)
1849    }
1850
1851    #[doc(hidden)]
1852    #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1853                         `find_first`, or `find_last`")]
1854    fn find<P>(self, predicate: P) -> Option<Self::Item>
1855    where
1856        P: Fn(&Self::Item) -> bool + Sync + Send,
1857    {
1858        self.find_any(predicate)
1859    }
1860
1861    /// Searches for **some** item in the parallel iterator that
1862    /// matches the given predicate, and if so returns true.  Once
1863    /// a match is found, we'll attempt to stop process the rest
1864    /// of the items.  Proving that there's no match, returning false,
1865    /// does require visiting every item.
1866    ///
1867    /// # Examples
1868    ///
1869    /// ```
1870    /// use par_iter::prelude::*;
1871    ///
1872    /// let a = [0, 12, 3, 4, 0, 23, 0];
1873    ///
1874    /// let is_valid = a.par_iter().any(|&x| x > 10);
1875    ///
1876    /// assert!(is_valid);
1877    /// ```
1878    fn any<P>(self, predicate: P) -> bool
1879    where
1880        P: Fn(Self::Item) -> bool + Sync + Send,
1881    {
1882        self.map(predicate).find_any(bool::clone).is_some()
1883    }
1884
1885    /// Tests that every item in the parallel iterator matches the given
1886    /// predicate, and if so returns true.  If a counter-example is found,
1887    /// we'll attempt to stop processing more items, then return false.
1888    ///
1889    /// # Examples
1890    ///
1891    /// ```
1892    /// use par_iter::prelude::*;
1893    ///
1894    /// let a = [0, 12, 3, 4, 0, 23, 0];
1895    ///
1896    /// let is_valid = a.par_iter().all(|&x| x > 10);
1897    ///
1898    /// assert!(!is_valid);
1899    /// ```
1900    fn all<P>(self, predicate: P) -> bool
1901    where
1902        P: Fn(Self::Item) -> bool + Sync + Send,
1903    {
1904        #[inline]
1905        fn is_false(x: &bool) -> bool {
1906            !x
1907        }
1908
1909        self.map(predicate).find_any(is_false).is_none()
1910    }
1911
1912    /// Creates an iterator over the `Some` items of this iterator, halting
1913    /// as soon as any `None` is found.
1914    ///
1915    /// # Examples
1916    ///
1917    /// ```
1918    /// use par_iter::prelude::*;
1919    /// use std::sync::atomic::{AtomicUsize, Ordering};
1920    ///
1921    /// let counter = AtomicUsize::new(0);
1922    /// let value = (0_i32..2048)
1923    ///     .into_par_iter()
1924    ///     .map(|x| {
1925    ///              counter.fetch_add(1, Ordering::SeqCst);
1926    ///              if x < 1024 { Some(x) } else { None }
1927    ///          })
1928    ///     .while_some()
1929    ///     .max();
1930    ///
1931    /// assert!(value < Some(1024));
1932    /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1933    /// ```
1934    fn while_some<T>(self) -> WhileSome<Self>
1935    where
1936        Self: ParallelIterator<Item = Option<T>>,
1937        T: Send,
1938    {
1939        WhileSome::new(self)
1940    }
1941
1942    /// Wraps an iterator with a fuse in case of panics, to halt all threads
1943    /// as soon as possible.
1944    ///
1945    /// Panics within parallel iterators are always propagated to the caller,
1946    /// but they don't always halt the rest of the iterator right away, due to
1947    /// the internal semantics of [`join`]. This adaptor makes a greater effort
1948    /// to stop processing other items sooner, with the cost of additional
1949    /// synchronization overhead, which may also inhibit some optimizations.
1950    ///
1951    /// [`join`]: ../fn.join.html#panics
1952    ///
1953    /// # Examples
1954    ///
1955    /// If this code didn't use `panic_fuse()`, it would continue processing
1956    /// many more items in other threads (with long sleep delays) before the
1957    /// panic is finally propagated.
1958    ///
1959    /// ```ignore
1960    /// use par_iter::prelude::*;
1961    /// use std::{thread, time};
1962    ///
1963    /// (0..1_000_000)
1964    ///     .into_par_iter()
1965    ///     .panic_fuse()
1966    ///     .for_each(|i| {
1967    ///         // simulate some work
1968    ///         thread::sleep(time::Duration::from_secs(1));
1969    ///         assert!(i > 0); // oops!
1970    ///     });
1971    /// ```
1972    fn panic_fuse(self) -> PanicFuse<Self> {
1973        PanicFuse::new(self)
1974    }
1975
1976    /// Creates a fresh collection containing all the elements produced
1977    /// by this parallel iterator.
1978    ///
1979    /// You may prefer [`collect_into_vec()`] implemented on
1980    /// [`IndexedParallelIterator`], if your underlying iterator also implements
1981    /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1982    /// of how many elements the iterator contains, and even allows you to reuse
1983    /// an existing vector's backing store rather than allocating a fresh
1984    /// vector.
1985    ///
1986    /// See also [`collect_vec_list()`][Self::collect_vec_list] for collecting
1987    /// into a `LinkedList<Vec<T>>`.
1988    ///
1989    /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
1990    /// [`collect_into_vec()`]:
1991    ///     trait.IndexedParallelIterator.html#method.collect_into_vec
1992    ///
1993    /// # Examples
1994    ///
1995    /// ```
1996    /// use par_iter::prelude::*;
1997    ///
1998    /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1999    ///
2000    /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
2001    ///
2002    /// assert_eq!(sync_vec, async_vec);
2003    /// ```
2004    ///
2005    /// You can collect a pair of collections like [`unzip`](#method.unzip)
2006    /// for paired items:
2007    ///
2008    /// ```
2009    /// use par_iter::prelude::*;
2010    ///
2011    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2012    /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
2013    ///
2014    /// assert_eq!(first, [0, 1, 2, 3]);
2015    /// assert_eq!(second, [1, 2, 3, 4]);
2016    /// ```
2017    ///
2018    /// Or like [`partition_map`](#method.partition_map) for `Either` items:
2019    ///
2020    /// ```
2021    /// use par_iter::prelude::*;
2022    /// use par_iter::iter::Either;
2023    ///
2024    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2025    ///     if x % 2 == 0 {
2026    ///         Either::Left(x * 4)
2027    ///     } else {
2028    ///         Either::Right(x * 3)
2029    ///     }
2030    /// }).collect();
2031    ///
2032    /// assert_eq!(left, [0, 8, 16, 24]);
2033    /// assert_eq!(right, [3, 9, 15, 21]);
2034    /// ```
2035    ///
2036    /// You can even collect an arbitrarily-nested combination of pairs and
2037    /// `Either`:
2038    ///
2039    /// ```
2040    /// use par_iter::prelude::*;
2041    /// use par_iter::iter::Either;
2042    ///
2043    /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2044    ///     = (0..8).into_par_iter().map(|x| {
2045    ///         if x % 2 == 0 {
2046    ///             (x, Either::Left(x * 4))
2047    ///         } else {
2048    ///             (-x, Either::Right(x * 3))
2049    ///         }
2050    ///     }).collect();
2051    ///
2052    /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2053    /// assert_eq!(left, [0, 8, 16, 24]);
2054    /// assert_eq!(right, [3, 9, 15, 21]);
2055    /// ```
2056    ///
2057    /// All of that can _also_ be combined with short-circuiting collection of
2058    /// `Result` or `Option` types:
2059    ///
2060    /// ```
2061    /// use par_iter::prelude::*;
2062    /// use par_iter::iter::Either;
2063    ///
2064    /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2065    ///     = (0..8).into_par_iter().map(|x| {
2066    ///         if x > 5 {
2067    ///             Err(x)
2068    ///         } else if x % 2 == 0 {
2069    ///             Ok((x, Either::Left(x * 4)))
2070    ///         } else {
2071    ///             Ok((-x, Either::Right(x * 3)))
2072    ///         }
2073    ///     }).collect();
2074    ///
2075    /// let error = result.unwrap_err();
2076    /// assert!(error == 6 || error == 7);
2077    /// ```
2078    fn collect<C>(self) -> C
2079    where
2080        C: FromParallelIterator<Self::Item>,
2081    {
2082        C::from_par_iter(self)
2083    }
2084
2085    /// Unzips the items of a parallel iterator into a pair of arbitrary
2086    /// `ParallelExtend` containers.
2087    ///
2088    /// You may prefer to use `unzip_into_vecs()`, which allocates more
2089    /// efficiently with precise knowledge of how many elements the
2090    /// iterator contains, and even allows you to reuse existing
2091    /// vectors' backing stores rather than allocating fresh vectors.
2092    ///
2093    /// # Examples
2094    ///
2095    /// ```
2096    /// use par_iter::prelude::*;
2097    ///
2098    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2099    ///
2100    /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2101    ///
2102    /// assert_eq!(left, [0, 1, 2, 3]);
2103    /// assert_eq!(right, [1, 2, 3, 4]);
2104    /// ```
2105    ///
2106    /// Nested pairs can be unzipped too.
2107    ///
2108    /// ```
2109    /// use par_iter::prelude::*;
2110    ///
2111    /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2112    ///     .map(|i| (i, (i * i, i * i * i)))
2113    ///     .unzip();
2114    ///
2115    /// assert_eq!(values, [0, 1, 2, 3]);
2116    /// assert_eq!(squares, [0, 1, 4, 9]);
2117    /// assert_eq!(cubes, [0, 1, 8, 27]);
2118    /// ```
2119    fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2120    where
2121        Self: ParallelIterator<Item = (A, B)>,
2122        FromA: Default + Send + ParallelExtend<A>,
2123        FromB: Default + Send + ParallelExtend<B>,
2124        A: Send,
2125        B: Send,
2126    {
2127        unzip::unzip(self)
2128    }
2129
2130    /// Partitions the items of a parallel iterator into a pair of arbitrary
2131    /// `ParallelExtend` containers.  Items for which the `predicate` returns
2132    /// true go into the first container, and the rest go into the second.
2133    ///
2134    /// Note: unlike the standard `Iterator::partition`, this allows distinct
2135    /// collection types for the left and right items.  This is more flexible,
2136    /// but may require new type annotations when converting sequential code
2137    /// that used type inference assuming the two were the same.
2138    ///
2139    /// # Examples
2140    ///
2141    /// ```
2142    /// use par_iter::prelude::*;
2143    ///
2144    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2145    ///
2146    /// assert_eq!(left, [0, 2, 4, 6]);
2147    /// assert_eq!(right, [1, 3, 5, 7]);
2148    /// ```
2149    fn partition<A, B, P>(self, predicate: P) -> (A, B)
2150    where
2151        A: Default + Send + ParallelExtend<Self::Item>,
2152        B: Default + Send + ParallelExtend<Self::Item>,
2153        P: Fn(&Self::Item) -> bool + Sync + Send,
2154    {
2155        unzip::partition(self, predicate)
2156    }
2157
2158    /// Partitions and maps the items of a parallel iterator into a pair of
2159    /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2160    /// the first container, and `Either::Right` items go into the second.
2161    ///
2162    /// # Examples
2163    ///
2164    /// ```
2165    /// use par_iter::prelude::*;
2166    /// use par_iter::iter::Either;
2167    ///
2168    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2169    ///     .partition_map(|x| {
2170    ///         if x % 2 == 0 {
2171    ///             Either::Left(x * 4)
2172    ///         } else {
2173    ///             Either::Right(x * 3)
2174    ///         }
2175    ///     });
2176    ///
2177    /// assert_eq!(left, [0, 8, 16, 24]);
2178    /// assert_eq!(right, [3, 9, 15, 21]);
2179    /// ```
2180    ///
2181    /// Nested `Either` enums can be split as well.
2182    ///
2183    /// ```
2184    /// use par_iter::prelude::*;
2185    /// use par_iter::iter::Either::*;
2186    ///
2187    /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2188    ///     .into_par_iter()
2189    ///     .partition_map(|x| match (x % 3, x % 5) {
2190    ///         (0, 0) => Left(Left(x)),
2191    ///         (0, _) => Left(Right(x)),
2192    ///         (_, 0) => Right(Left(x)),
2193    ///         (_, _) => Right(Right(x)),
2194    ///     });
2195    ///
2196    /// assert_eq!(fizzbuzz, [15]);
2197    /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2198    /// assert_eq!(buzz, [5, 10]);
2199    /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2200    /// ```
2201    fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2202    where
2203        A: Default + Send + ParallelExtend<L>,
2204        B: Default + Send + ParallelExtend<R>,
2205        P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2206        L: Send,
2207        R: Send,
2208    {
2209        unzip::partition_map(self, predicate)
2210    }
2211
2212    /// Intersperses clones of an element between items of this iterator.
2213    ///
2214    /// # Examples
2215    ///
2216    /// ```
2217    /// use par_iter::prelude::*;
2218    ///
2219    /// let x = vec![1, 2, 3];
2220    /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2221    ///
2222    /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2223    /// ```
2224    fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2225    where
2226        Self::Item: Clone,
2227    {
2228        Intersperse::new(self, element)
2229    }
2230
2231    /// Creates an iterator that yields `n` elements from *anywhere* in the
2232    /// original iterator.
2233    ///
2234    /// This is similar to [`IndexedParallelIterator::take`] without being
2235    /// constrained to the "first" `n` of the original iterator order. The
2236    /// taken items will still maintain their relative order where that is
2237    /// visible in `collect`, `reduce`, and similar outputs.
2238    ///
2239    /// # Examples
2240    ///
2241    /// ```
2242    /// use par_iter::prelude::*;
2243    ///
2244    /// let result: Vec<_> = (0..100)
2245    ///     .into_par_iter()
2246    ///     .filter(|&x| x % 2 == 0)
2247    ///     .take_any(5)
2248    ///     .collect();
2249    ///
2250    /// assert_eq!(result.len(), 5);
2251    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2252    /// ```
2253    fn take_any(self, n: usize) -> TakeAny<Self> {
2254        TakeAny::new(self, n)
2255    }
2256
2257    /// Creates an iterator that skips `n` elements from *anywhere* in the
2258    /// original iterator.
2259    ///
2260    /// This is similar to [`IndexedParallelIterator::skip`] without being
2261    /// constrained to the "first" `n` of the original iterator order. The
2262    /// remaining items will still maintain their relative order where that is
2263    /// visible in `collect`, `reduce`, and similar outputs.
2264    ///
2265    /// # Examples
2266    ///
2267    /// ```
2268    /// use par_iter::prelude::*;
2269    ///
2270    /// let result: Vec<_> = (0..100)
2271    ///     .into_par_iter()
2272    ///     .filter(|&x| x % 2 == 0)
2273    ///     .skip_any(5)
2274    ///     .collect();
2275    ///
2276    /// assert_eq!(result.len(), 45);
2277    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2278    /// ```
2279    fn skip_any(self, n: usize) -> SkipAny<Self> {
2280        SkipAny::new(self, n)
2281    }
2282
2283    /// Creates an iterator that takes elements from *anywhere* in the original
2284    /// iterator until the given `predicate` returns `false`.
2285    ///
2286    /// The `predicate` may be anything -- e.g. it could be checking a fact
2287    /// about the item, a global condition unrelated to the item itself, or
2288    /// some combination thereof.
2289    ///
2290    /// If parallel calls to the `predicate` race and give different results,
2291    /// then the `true` results will still take those particular items,
2292    /// while respecting the `false` result from elsewhere to skip any
2293    /// further items.
2294    ///
2295    /// This is similar to [`Iterator::take_while`] without being constrained to
2296    /// the original iterator order. The taken items will still maintain
2297    /// their relative order where that is visible in `collect`, `reduce`,
2298    /// and similar outputs.
2299    ///
2300    /// # Examples
2301    ///
2302    /// ```
2303    /// use par_iter::prelude::*;
2304    ///
2305    /// let result: Vec<_> = (0..100)
2306    ///     .into_par_iter()
2307    ///     .take_any_while(|x| *x < 50)
2308    ///     .collect();
2309    ///
2310    /// assert!(result.len() <= 50);
2311    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2312    /// ```
2313    ///
2314    /// ```
2315    /// use par_iter::prelude::*;
2316    /// use std::sync::atomic::AtomicUsize;
2317    /// use std::sync::atomic::Ordering::Relaxed;
2318    ///
2319    /// // Collect any group of items that sum <= 1000
2320    /// let quota = AtomicUsize::new(1000);
2321    /// let result: Vec<_> = (0_usize..100)
2322    ///     .into_par_iter()
2323    ///     .take_any_while(|&x| {
2324    ///         quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2325    ///             .is_ok()
2326    ///     })
2327    ///     .collect();
2328    ///
2329    /// let sum = result.iter().sum::<usize>();
2330    /// assert!(matches!(sum, 902..=1000));
2331    /// ```
2332    fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2333    where
2334        P: Fn(&Self::Item) -> bool + Sync + Send,
2335    {
2336        TakeAnyWhile::new(self, predicate)
2337    }
2338
2339    /// Creates an iterator that skips elements from *anywhere* in the original
2340    /// iterator until the given `predicate` returns `false`.
2341    ///
2342    /// The `predicate` may be anything -- e.g. it could be checking a fact
2343    /// about the item, a global condition unrelated to the item itself, or
2344    /// some combination thereof.
2345    ///
2346    /// If parallel calls to the `predicate` race and give different results,
2347    /// then the `true` results will still skip those particular items,
2348    /// while respecting the `false` result from elsewhere to skip any
2349    /// further items.
2350    ///
2351    /// This is similar to [`Iterator::skip_while`] without being constrained to
2352    /// the original iterator order. The remaining items will still maintain
2353    /// their relative order where that is visible in `collect`, `reduce`,
2354    /// and similar outputs.
2355    ///
2356    /// # Examples
2357    ///
2358    /// ```
2359    /// use par_iter::prelude::*;
2360    ///
2361    /// let result: Vec<_> = (0..100)
2362    ///     .into_par_iter()
2363    ///     .skip_any_while(|x| *x < 50)
2364    ///     .collect();
2365    ///
2366    /// assert!(result.len() >= 50);
2367    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2368    /// ```
2369    fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2370    where
2371        P: Fn(&Self::Item) -> bool + Sync + Send,
2372    {
2373        SkipAnyWhile::new(self, predicate)
2374    }
2375
2376    /// Collects this iterator into a linked list of vectors.
2377    ///
2378    /// This is useful when you need to condense a parallel iterator into a
2379    /// collection, but have no specific requirements for what that
2380    /// collection should be. If you plan to store the collection
2381    /// longer-term, `Vec<T>` is, as always, likely the best default choice,
2382    /// despite the overhead that comes from concatenating each vector. Or,
2383    /// if this is an `IndexedParallelIterator`, you should also prefer to
2384    /// just collect to a `Vec<T>`.
2385    ///
2386    /// Internally, most [`FromParallelIterator`]/[`ParallelExtend`]
2387    /// implementations use this strategy; each job collecting their chunk
2388    /// of the iterator to a `Vec<T>` and those chunks getting merged into a
2389    /// `LinkedList`, before then extending the collection with each vector.
2390    /// This is a very efficient way to collect an unindexed parallel
2391    /// iterator, without much intermediate data movement.
2392    ///
2393    /// # Examples
2394    ///
2395    /// ```
2396    /// # use std::collections::LinkedList;
2397    /// use par_iter::prelude::*;
2398    ///
2399    /// let result: LinkedList<Vec<_>> = (0..=100)
2400    ///     .into_par_iter()
2401    ///     .filter(|x| x % 2 == 0)
2402    ///     .flat_map(|x| 0..x)
2403    ///     .collect_vec_list();
2404    ///
2405    /// // `par_iter.collect_vec_list().into_iter().flatten()` turns
2406    /// // a parallel iterator into a serial one
2407    /// let total_len = result.into_iter().flatten().count();
2408    /// assert_eq!(total_len, 2550);
2409    /// ```
2410    fn collect_vec_list(self) -> LinkedList<Vec<Self::Item>> {
2411        match extend::fast_collect(self) {
2412            Either::Left(vec) => {
2413                let mut list = LinkedList::new();
2414                if !vec.is_empty() {
2415                    list.push_back(vec);
2416                }
2417                list
2418            }
2419            Either::Right(list) => list,
2420        }
2421    }
2422
2423    /// Internal method used to define the behavior of this parallel
2424    /// iterator. You should not need to call this directly.
2425    ///
2426    /// This method causes the iterator `self` to start producing
2427    /// items and to feed them to the consumer `consumer` one by one.
2428    /// It may split the consumer before doing so to create the
2429    /// opportunity to produce in parallel.
2430    ///
2431    /// See the [README] for more details on the internals of parallel
2432    /// iterators.
2433    ///
2434    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
2435    fn drive_unindexed<C>(self, consumer: C) -> C::Result
2436    where
2437        C: UnindexedConsumer<Self::Item>;
2438
2439    /// Internal method used to define the behavior of this parallel
2440    /// iterator. You should not need to call this directly.
2441    ///
2442    /// Returns the number of items produced by this iterator, if known
2443    /// statically. This can be used by consumers to trigger special fast
2444    /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2445    /// use the (indexed) `Consumer` methods when driving a consumer, such
2446    /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2447    /// other `UnindexedConsumer` methods -- or returning an inaccurate
2448    /// value -- may result in panics.
2449    ///
2450    /// This method is currently used to optimize `collect` for want
2451    /// of true Rust specialization; it may be removed when
2452    /// specialization is stable.
2453    fn opt_len(&self) -> Option<usize> {
2454        None
2455    }
2456}
2457
2458impl<T: ParallelIterator> IntoParallelIterator for T {
2459    type Item = T::Item;
2460    type Iter = T;
2461
2462    fn into_par_iter(self) -> T {
2463        self
2464    }
2465}
2466
2467/// An iterator that supports "random access" to its data, meaning
2468/// that you can split it at arbitrary indices and draw data from
2469/// those points.
2470///
2471/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2472// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2473#[allow(clippy::len_without_is_empty)]
2474pub trait IndexedParallelIterator: ParallelIterator {
2475    /// Divides an iterator into sequential blocks of exponentially-increasing
2476    /// size.
2477    ///
2478    /// Normally, parallel iterators are recursively divided into tasks in
2479    /// parallel. This adaptor changes the default behavior by splitting the
2480    /// iterator into a **sequence** of parallel iterators of increasing
2481    /// sizes. Sizes grow exponentially in order to avoid creating
2482    /// too many blocks. This also allows to balance the current block with all
2483    /// previous ones.
2484    ///
2485    /// This can have many applications but the most notable ones are:
2486    /// - better performance with [`find_first()`][ParallelIterator::find_first]
2487    /// - more predictable performance with
2488    ///   [`find_any()`][ParallelIterator::find_any] or any interruptible
2489    ///   computation
2490    ///
2491    /// # Examples
2492    ///
2493    /// ```
2494    /// use par_iter::prelude::*;
2495    /// assert_eq!((0..10_000).into_par_iter()
2496    ///                       .by_exponential_blocks()
2497    ///                       .find_first(|&e| e==4_999), Some(4_999))
2498    /// ```
2499    ///
2500    /// In this example, without blocks, rayon will split the initial range into
2501    /// two but all work on the right hand side (from 5,000 onwards) is
2502    /// **useless** since the sequential algorithm never goes there. This
2503    /// means that if two threads are used there will be **no** speedup **at
2504    /// all**.
2505    ///
2506    /// `by_exponential_blocks` on the other hand will start with the leftmost
2507    /// range from 0 to `p` (threads number), continue with p to 3p, the 3p
2508    /// to 7p...
2509    ///
2510    /// Each subrange is treated in parallel, while all subranges are treated
2511    /// sequentially. We therefore ensure a logarithmic number of blocks
2512    /// (and overhead) while guaranteeing we stop at the first block
2513    /// containing the searched data.
2514    fn by_exponential_blocks(self) -> ExponentialBlocks<Self> {
2515        ExponentialBlocks::new(self)
2516    }
2517
2518    /// Divides an iterator into sequential blocks of the given size.
2519    ///
2520    /// Normally, parallel iterators are recursively divided into tasks in
2521    /// parallel. This adaptor changes the default behavior by splitting the
2522    /// iterator into a **sequence** of parallel iterators of given
2523    /// `block_size`. The main application is to obtain better
2524    /// memory locality (especially if the reduce operation re-use folded data).
2525    ///
2526    /// **Panics** if `block_size` is 0.
2527    ///
2528    /// # Example
2529    /// ```
2530    /// use par_iter::prelude::*;
2531    /// // during most reductions v1 and v2 fit the cache
2532    /// let v = (0u32..10_000_000)
2533    ///     .into_par_iter()
2534    ///     .by_uniform_blocks(1_000_000)
2535    ///     .fold(Vec::new, |mut v, e| { v.push(e); v})
2536    ///     .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
2537    /// assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());
2538    /// ```
2539    #[track_caller]
2540    fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> {
2541        assert!(block_size != 0, "block_size must not be zero");
2542        UniformBlocks::new(self, block_size)
2543    }
2544
2545    /// Collects the results of the iterator into the specified
2546    /// vector. The vector is always cleared before execution
2547    /// begins. If possible, reusing the vector across calls can lead
2548    /// to better performance since it reuses the same backing buffer.
2549    ///
2550    /// # Examples
2551    ///
2552    /// ```
2553    /// use par_iter::prelude::*;
2554    ///
2555    /// // any prior data will be cleared
2556    /// let mut vec = vec![-1, -2, -3];
2557    ///
2558    /// (0..5).into_par_iter()
2559    ///     .collect_into_vec(&mut vec);
2560    ///
2561    /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2562    /// ```
2563    fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2564        collect::collect_into_vec(self, target);
2565    }
2566
2567    /// Unzips the results of the iterator into the specified
2568    /// vectors. The vectors are always cleared before execution
2569    /// begins. If possible, reusing the vectors across calls can lead
2570    /// to better performance since they reuse the same backing buffer.
2571    ///
2572    /// # Examples
2573    ///
2574    /// ```
2575    /// use par_iter::prelude::*;
2576    ///
2577    /// // any prior data will be cleared
2578    /// let mut left = vec![42; 10];
2579    /// let mut right = vec![-1; 10];
2580    ///
2581    /// (10..15).into_par_iter()
2582    ///     .enumerate()
2583    ///     .unzip_into_vecs(&mut left, &mut right);
2584    ///
2585    /// assert_eq!(left, [0, 1, 2, 3, 4]);
2586    /// assert_eq!(right, [10, 11, 12, 13, 14]);
2587    /// ```
2588    fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2589    where
2590        Self: IndexedParallelIterator<Item = (A, B)>,
2591        A: Send,
2592        B: Send,
2593    {
2594        collect::unzip_into_vecs(self, left, right);
2595    }
2596
2597    /// Iterates over tuples `(A, B)`, where the items `A` are from
2598    /// this iterator and `B` are from the iterator given as argument.
2599    /// Like the `zip` method on ordinary iterators, if the two
2600    /// iterators are of unequal length, you only get the items they
2601    /// have in common.
2602    ///
2603    /// # Examples
2604    ///
2605    /// ```
2606    /// use par_iter::prelude::*;
2607    ///
2608    /// let result: Vec<_> = (1..4)
2609    ///     .into_par_iter()
2610    ///     .zip(vec!['a', 'b', 'c'])
2611    ///     .collect();
2612    ///
2613    /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2614    /// ```
2615    fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2616    where
2617        Z: IntoParallelIterator,
2618        Z::Iter: IndexedParallelIterator,
2619    {
2620        Zip::new(self, zip_op.into_par_iter())
2621    }
2622
2623    /// The same as `Zip`, but requires that both iterators have the same
2624    /// length.
2625    ///
2626    /// # Panics
2627    /// Will panic if `self` and `zip_op` are not the same length.
2628    ///
2629    /// ```should_panic
2630    /// use par_iter::prelude::*;
2631    ///
2632    /// let one = [1u8];
2633    /// let two = [2u8, 2];
2634    /// let one_iter = one.par_iter();
2635    /// let two_iter = two.par_iter();
2636    ///
2637    /// // this will panic
2638    /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2639    ///
2640    /// // we should never get here
2641    /// assert_eq!(1, zipped.len());
2642    /// ```
2643    #[track_caller]
2644    fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2645    where
2646        Z: IntoParallelIterator,
2647        Z::Iter: IndexedParallelIterator,
2648    {
2649        let zip_op_iter = zip_op.into_par_iter();
2650        assert_eq!(
2651            self.len(),
2652            zip_op_iter.len(),
2653            "iterators must have the same length"
2654        );
2655        ZipEq::new(self, zip_op_iter)
2656    }
2657
2658    /// Interleaves elements of this iterator and the other given
2659    /// iterator. Alternately yields elements from this iterator and
2660    /// the given iterator, until both are exhausted. If one iterator
2661    /// is exhausted before the other, the last elements are provided
2662    /// from the other.
2663    ///
2664    /// # Examples
2665    ///
2666    /// ```
2667    /// use par_iter::prelude::*;
2668    /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2669    /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2670    /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2671    /// ```
2672    fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2673    where
2674        I: IntoParallelIterator<Item = Self::Item>,
2675        I::Iter: IndexedParallelIterator<Item = Self::Item>,
2676    {
2677        Interleave::new(self, other.into_par_iter())
2678    }
2679
2680    /// Interleaves elements of this iterator and the other given
2681    /// iterator, until one is exhausted.
2682    ///
2683    /// # Examples
2684    ///
2685    /// ```
2686    /// use par_iter::prelude::*;
2687    /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2688    /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2689    /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2690    /// ```
2691    fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2692    where
2693        I: IntoParallelIterator<Item = Self::Item>,
2694        I::Iter: IndexedParallelIterator<Item = Self::Item>,
2695    {
2696        InterleaveShortest::new(self, other.into_par_iter())
2697    }
2698
2699    /// Splits an iterator up into fixed-size chunks.
2700    ///
2701    /// Returns an iterator that returns `Vec`s of the given number of elements.
2702    /// If the number of elements in the iterator is not divisible by
2703    /// `chunk_size`, the last chunk may be shorter than `chunk_size`.
2704    ///
2705    /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior
2706    /// on slices, without having to allocate intermediate `Vec`s for the
2707    /// chunks.
2708    ///
2709    /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2710    /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2711    ///
2712    /// **Panics** if `chunk_size` is 0.
2713    ///
2714    /// # Examples
2715    ///
2716    /// ```
2717    /// use par_iter::prelude::*;
2718    /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2719    /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2720    /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2721    /// ```
2722    #[track_caller]
2723    fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2724        assert!(chunk_size != 0, "chunk_size must not be zero");
2725        Chunks::new(self, chunk_size)
2726    }
2727
2728    /// Splits an iterator into fixed-size chunks, performing a sequential
2729    /// [`fold()`] on each chunk.
2730    ///
2731    /// Returns an iterator that produces a folded result for each chunk of
2732    /// items produced by this iterator.
2733    ///
2734    /// This works essentially like:
2735    ///
2736    /// ```text
2737    /// iter.chunks(chunk_size)
2738    ///     .map(|chunk|
2739    ///         chunk.into_iter()
2740    ///             .fold(identity, fold_op)
2741    ///     )
2742    /// ```
2743    ///
2744    /// except there is no per-chunk allocation overhead.
2745    ///
2746    /// [`fold()`]: std::iter::Iterator#method.fold
2747    ///
2748    /// **Panics** if `chunk_size` is 0.
2749    ///
2750    /// # Examples
2751    ///
2752    /// ```
2753    /// use par_iter::prelude::*;
2754    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2755    /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2756    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2757    /// ```
2758    #[track_caller]
2759    fn fold_chunks<T, ID, F>(
2760        self,
2761        chunk_size: usize,
2762        identity: ID,
2763        fold_op: F,
2764    ) -> FoldChunks<Self, ID, F>
2765    where
2766        ID: Fn() -> T + Send + Sync,
2767        F: Fn(T, Self::Item) -> T + Send + Sync,
2768        T: Send,
2769    {
2770        assert!(chunk_size != 0, "chunk_size must not be zero");
2771        FoldChunks::new(self, chunk_size, identity, fold_op)
2772    }
2773
2774    /// Splits an iterator into fixed-size chunks, performing a sequential
2775    /// [`fold()`] on each chunk.
2776    ///
2777    /// Returns an iterator that produces a folded result for each chunk of
2778    /// items produced by this iterator.
2779    ///
2780    /// This works essentially like `fold_chunks(chunk_size, || init.clone(),
2781    /// fold_op)`, except it doesn't require the `init` type to be `Sync`,
2782    /// nor any other form of added synchronization.
2783    ///
2784    /// [`fold()`]: std::iter::Iterator#method.fold
2785    ///
2786    /// **Panics** if `chunk_size` is 0.
2787    ///
2788    /// # Examples
2789    ///
2790    /// ```
2791    /// use par_iter::prelude::*;
2792    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2793    /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2794    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2795    /// ```
2796    #[track_caller]
2797    fn fold_chunks_with<T, F>(
2798        self,
2799        chunk_size: usize,
2800        init: T,
2801        fold_op: F,
2802    ) -> FoldChunksWith<Self, T, F>
2803    where
2804        T: Send + Clone,
2805        F: Fn(T, Self::Item) -> T + Send + Sync,
2806    {
2807        assert!(chunk_size != 0, "chunk_size must not be zero");
2808        FoldChunksWith::new(self, chunk_size, init, fold_op)
2809    }
2810
2811    /// Lexicographically compares the elements of this `ParallelIterator` with
2812    /// those of another.
2813    ///
2814    /// # Examples
2815    ///
2816    /// ```
2817    /// use par_iter::prelude::*;
2818    /// use std::cmp::Ordering::*;
2819    ///
2820    /// let x = vec![1, 2, 3];
2821    /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2822    /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2823    /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2824    /// ```
2825    fn cmp<I>(self, other: I) -> Ordering
2826    where
2827        I: IntoParallelIterator<Item = Self::Item>,
2828        I::Iter: IndexedParallelIterator,
2829        Self::Item: Ord,
2830    {
2831        #[inline]
2832        fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2833            Ord::cmp(&x, &y)
2834        }
2835
2836        #[inline]
2837        fn inequal(&ord: &Ordering) -> bool {
2838            ord != Ordering::Equal
2839        }
2840
2841        let other = other.into_par_iter();
2842        let ord_len = self.len().cmp(&other.len());
2843        self.zip(other)
2844            .map(ordering)
2845            .find_first(inequal)
2846            .unwrap_or(ord_len)
2847    }
2848
2849    /// Lexicographically compares the elements of this `ParallelIterator` with
2850    /// those of another.
2851    ///
2852    /// # Examples
2853    ///
2854    /// ```
2855    /// use par_iter::prelude::*;
2856    /// use std::cmp::Ordering::*;
2857    /// use std::f64::NAN;
2858    ///
2859    /// let x = vec![1.0, 2.0, 3.0];
2860    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2861    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2862    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2863    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2864    /// ```
2865    fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2866    where
2867        I: IntoParallelIterator,
2868        I::Iter: IndexedParallelIterator,
2869        Self::Item: PartialOrd<I::Item>,
2870    {
2871        #[inline]
2872        fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2873            PartialOrd::partial_cmp(&x, &y)
2874        }
2875
2876        #[inline]
2877        fn inequal(&ord: &Option<Ordering>) -> bool {
2878            ord != Some(Ordering::Equal)
2879        }
2880
2881        let other = other.into_par_iter();
2882        let ord_len = self.len().cmp(&other.len());
2883        self.zip(other)
2884            .map(ordering)
2885            .find_first(inequal)
2886            .unwrap_or(Some(ord_len))
2887    }
2888
2889    /// Determines if the elements of this `ParallelIterator`
2890    /// are equal to those of another
2891    fn eq<I>(self, other: I) -> bool
2892    where
2893        I: IntoParallelIterator,
2894        I::Iter: IndexedParallelIterator,
2895        Self::Item: PartialEq<I::Item>,
2896    {
2897        #[inline]
2898        fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2899            PartialEq::eq(&x, &y)
2900        }
2901
2902        let other = other.into_par_iter();
2903        self.len() == other.len() && self.zip(other).all(eq)
2904    }
2905
2906    /// Determines if the elements of this `ParallelIterator`
2907    /// are unequal to those of another
2908    fn ne<I>(self, other: I) -> bool
2909    where
2910        I: IntoParallelIterator,
2911        I::Iter: IndexedParallelIterator,
2912        Self::Item: PartialEq<I::Item>,
2913    {
2914        !self.eq(other)
2915    }
2916
2917    /// Determines if the elements of this `ParallelIterator`
2918    /// are lexicographically less than those of another.
2919    fn lt<I>(self, other: I) -> bool
2920    where
2921        I: IntoParallelIterator,
2922        I::Iter: IndexedParallelIterator,
2923        Self::Item: PartialOrd<I::Item>,
2924    {
2925        self.partial_cmp(other) == Some(Ordering::Less)
2926    }
2927
2928    /// Determines if the elements of this `ParallelIterator`
2929    /// are less or equal to those of another.
2930    fn le<I>(self, other: I) -> bool
2931    where
2932        I: IntoParallelIterator,
2933        I::Iter: IndexedParallelIterator,
2934        Self::Item: PartialOrd<I::Item>,
2935    {
2936        let ord = self.partial_cmp(other);
2937        ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2938    }
2939
2940    /// Determines if the elements of this `ParallelIterator`
2941    /// are lexicographically greater than those of another.
2942    fn gt<I>(self, other: I) -> bool
2943    where
2944        I: IntoParallelIterator,
2945        I::Iter: IndexedParallelIterator,
2946        Self::Item: PartialOrd<I::Item>,
2947    {
2948        self.partial_cmp(other) == Some(Ordering::Greater)
2949    }
2950
2951    /// Determines if the elements of this `ParallelIterator`
2952    /// are less or equal to those of another.
2953    fn ge<I>(self, other: I) -> bool
2954    where
2955        I: IntoParallelIterator,
2956        I::Iter: IndexedParallelIterator,
2957        Self::Item: PartialOrd<I::Item>,
2958    {
2959        let ord = self.partial_cmp(other);
2960        ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2961    }
2962
2963    /// Yields an index along with each item.
2964    ///
2965    /// # Examples
2966    ///
2967    /// ```
2968    /// use par_iter::prelude::*;
2969    ///
2970    /// let chars = vec!['a', 'b', 'c'];
2971    /// let result: Vec<_> = chars
2972    ///     .into_par_iter()
2973    ///     .enumerate()
2974    ///     .collect();
2975    ///
2976    /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2977    /// ```
2978    fn enumerate(self) -> Enumerate<Self> {
2979        Enumerate::new(self)
2980    }
2981
2982    /// Creates an iterator that steps by the given amount
2983    ///
2984    /// # Examples
2985    ///
2986    /// ```
2987    /// use par_iter::prelude::*;
2988    ///
2989    /// let range = (3..10);
2990    /// let result: Vec<i32> = range
2991    ///    .into_par_iter()
2992    ///    .step_by(3)
2993    ///    .collect();
2994    ///
2995    /// assert_eq!(result, [3, 6, 9])
2996    /// ```
2997    fn step_by(self, step: usize) -> StepBy<Self> {
2998        StepBy::new(self, step)
2999    }
3000
3001    /// Creates an iterator that skips the first `n` elements.
3002    ///
3003    /// # Examples
3004    ///
3005    /// ```
3006    /// use par_iter::prelude::*;
3007    ///
3008    /// let result: Vec<_> = (0..100)
3009    ///     .into_par_iter()
3010    ///     .skip(95)
3011    ///     .collect();
3012    ///
3013    /// assert_eq!(result, [95, 96, 97, 98, 99]);
3014    /// ```
3015    fn skip(self, n: usize) -> Skip<Self> {
3016        Skip::new(self, n)
3017    }
3018
3019    /// Creates an iterator that yields the first `n` elements.
3020    ///
3021    /// # Examples
3022    ///
3023    /// ```
3024    /// use par_iter::prelude::*;
3025    ///
3026    /// let result: Vec<_> = (0..100)
3027    ///     .into_par_iter()
3028    ///     .take(5)
3029    ///     .collect();
3030    ///
3031    /// assert_eq!(result, [0, 1, 2, 3, 4]);
3032    /// ```
3033    fn take(self, n: usize) -> Take<Self> {
3034        Take::new(self, n)
3035    }
3036
3037    /// Searches for **some** item in the parallel iterator that
3038    /// matches the given predicate, and returns its index.  Like
3039    /// `ParallelIterator::find_any`, the parallel search will not
3040    /// necessarily find the **first** match, and once a match is
3041    /// found we'll attempt to stop processing any more.
3042    ///
3043    /// # Examples
3044    ///
3045    /// ```
3046    /// use par_iter::prelude::*;
3047    ///
3048    /// let a = [1, 2, 3, 3];
3049    ///
3050    /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
3051    /// assert!(i == 2 || i == 3);
3052    ///
3053    /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
3054    /// ```
3055    fn position_any<P>(self, predicate: P) -> Option<usize>
3056    where
3057        P: Fn(Self::Item) -> bool + Sync + Send,
3058    {
3059        #[inline]
3060        fn check(&(_, p): &(usize, bool)) -> bool {
3061            p
3062        }
3063
3064        let (i, _) = self.map(predicate).enumerate().find_any(check)?;
3065        Some(i)
3066    }
3067
3068    /// Searches for the sequentially **first** item in the parallel iterator
3069    /// that matches the given predicate, and returns its index.
3070    ///
3071    /// Like `ParallelIterator::find_first`, once a match is found,
3072    /// all attempts to the right of the match will be stopped, while
3073    /// attempts to the left must continue in case an earlier match
3074    /// is found.
3075    ///
3076    /// Note that not all parallel iterators have a useful order, much like
3077    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
3078    /// just want the first match that discovered anywhere in the iterator,
3079    /// `position_any` is a better choice.
3080    ///
3081    /// # Examples
3082    ///
3083    /// ```
3084    /// use par_iter::prelude::*;
3085    ///
3086    /// let a = [1, 2, 3, 3];
3087    ///
3088    /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
3089    ///
3090    /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
3091    /// ```
3092    fn position_first<P>(self, predicate: P) -> Option<usize>
3093    where
3094        P: Fn(Self::Item) -> bool + Sync + Send,
3095    {
3096        #[inline]
3097        fn check(&(_, p): &(usize, bool)) -> bool {
3098            p
3099        }
3100
3101        let (i, _) = self.map(predicate).enumerate().find_first(check)?;
3102        Some(i)
3103    }
3104
3105    /// Searches for the sequentially **last** item in the parallel iterator
3106    /// that matches the given predicate, and returns its index.
3107    ///
3108    /// Like `ParallelIterator::find_last`, once a match is found,
3109    /// all attempts to the left of the match will be stopped, while
3110    /// attempts to the right must continue in case a later match
3111    /// is found.
3112    ///
3113    /// Note that not all parallel iterators have a useful order, much like
3114    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
3115    /// order doesn't actually matter to you, `position_any` is a better
3116    /// choice.
3117    ///
3118    /// # Examples
3119    ///
3120    /// ```
3121    /// use par_iter::prelude::*;
3122    ///
3123    /// let a = [1, 2, 3, 3];
3124    ///
3125    /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
3126    ///
3127    /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
3128    /// ```
3129    fn position_last<P>(self, predicate: P) -> Option<usize>
3130    where
3131        P: Fn(Self::Item) -> bool + Sync + Send,
3132    {
3133        #[inline]
3134        fn check(&(_, p): &(usize, bool)) -> bool {
3135            p
3136        }
3137
3138        let (i, _) = self.map(predicate).enumerate().find_last(check)?;
3139        Some(i)
3140    }
3141
3142    #[doc(hidden)]
3143    #[deprecated(
3144        note = "parallel `position` does not search in order -- use `position_any`, \\
3145                `position_first`, or `position_last`"
3146    )]
3147    fn position<P>(self, predicate: P) -> Option<usize>
3148    where
3149        P: Fn(Self::Item) -> bool + Sync + Send,
3150    {
3151        self.position_any(predicate)
3152    }
3153
3154    /// Searches for items in the parallel iterator that match the given
3155    /// predicate, and returns their indices.
3156    ///
3157    /// # Examples
3158    ///
3159    /// ```
3160    /// use par_iter::prelude::*;
3161    ///
3162    /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3163    ///
3164    /// // Find the positions of primes congruent to 1 modulo 6
3165    /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3166    /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3167    ///
3168    /// // Find the positions of primes congruent to 5 modulo 6
3169    /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3170    /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3171    /// ```
3172    fn positions<P>(self, predicate: P) -> Positions<Self, P>
3173    where
3174        P: Fn(Self::Item) -> bool + Sync + Send,
3175    {
3176        Positions::new(self, predicate)
3177    }
3178
3179    /// Produces a new iterator with the elements of this iterator in
3180    /// reverse order.
3181    ///
3182    /// # Examples
3183    ///
3184    /// ```
3185    /// use par_iter::prelude::*;
3186    ///
3187    /// let result: Vec<_> = (0..5)
3188    ///     .into_par_iter()
3189    ///     .rev()
3190    ///     .collect();
3191    ///
3192    /// assert_eq!(result, [4, 3, 2, 1, 0]);
3193    /// ```
3194    fn rev(self) -> Rev<Self> {
3195        Rev::new(self)
3196    }
3197
3198    /// Sets the minimum length of iterators desired to process in each
3199    /// rayon job.  Rayon will not split any smaller than this length, but
3200    /// of course an iterator could already be smaller to begin with.
3201    ///
3202    /// Producers like `zip` and `interleave` will use greater of the two
3203    /// minimums.
3204    /// Chained iterators and iterators inside `flat_map` may each use
3205    /// their own minimum length.
3206    ///
3207    /// # Examples
3208    ///
3209    /// ```
3210    /// use par_iter::prelude::*;
3211    ///
3212    /// let min = (0..1_000_000)
3213    ///     .into_par_iter()
3214    ///     .with_min_len(1234)
3215    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3216    ///     .min().unwrap();
3217    ///
3218    /// assert!(min >= 1234);
3219    /// ```
3220    fn with_min_len(self, min: usize) -> MinLen<Self> {
3221        MinLen::new(self, min)
3222    }
3223
3224    /// Sets the maximum length of iterators desired to process in each
3225    /// rayon job.  Rayon will try to split at least below this length,
3226    /// unless that would put it below the length from `with_min_len()`.
3227    /// For example, given min=10 and max=15, a length of 16 will not be
3228    /// split any further.
3229    ///
3230    /// Producers like `zip` and `interleave` will use lesser of the two
3231    /// maximums.
3232    /// Chained iterators and iterators inside `flat_map` may each use
3233    /// their own maximum length.
3234    ///
3235    /// # Examples
3236    ///
3237    /// ```
3238    /// use par_iter::prelude::*;
3239    ///
3240    /// let max = (0..1_000_000)
3241    ///     .into_par_iter()
3242    ///     .with_max_len(1234)
3243    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3244    ///     .max().unwrap();
3245    ///
3246    /// assert!(max <= 1234);
3247    /// ```
3248    fn with_max_len(self, max: usize) -> MaxLen<Self> {
3249        MaxLen::new(self, max)
3250    }
3251
3252    /// Produces an exact count of how many items this iterator will
3253    /// produce, presuming no panic occurs.
3254    ///
3255    /// # Examples
3256    ///
3257    /// ```
3258    /// use par_iter::prelude::*;
3259    ///
3260    /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3261    /// assert_eq!(par_iter.len(), 10);
3262    ///
3263    /// let vec: Vec<_> = par_iter.collect();
3264    /// assert_eq!(vec.len(), 10);
3265    /// ```
3266    fn len(&self) -> usize;
3267
3268    /// Internal method used to define the behavior of this parallel
3269    /// iterator. You should not need to call this directly.
3270    ///
3271    /// This method causes the iterator `self` to start producing
3272    /// items and to feed them to the consumer `consumer` one by one.
3273    /// It may split the consumer before doing so to create the
3274    /// opportunity to produce in parallel. If a split does happen, it
3275    /// will inform the consumer of the index where the split should
3276    /// occur (unlike `ParallelIterator::drive_unindexed()`).
3277    ///
3278    /// See the [README] for more details on the internals of parallel
3279    /// iterators.
3280    ///
3281    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3282    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3283
3284    /// Internal method used to define the behavior of this parallel
3285    /// iterator. You should not need to call this directly.
3286    ///
3287    /// This method converts the iterator into a producer P and then
3288    /// invokes `callback.callback()` with P. Note that the type of
3289    /// this producer is not defined as part of the API, since
3290    /// `callback` must be defined generically for all producers. This
3291    /// allows the producer type to contain references; it also means
3292    /// that parallel iterators can adjust that type without causing a
3293    /// breaking change.
3294    ///
3295    /// See the [README] for more details on the internals of parallel
3296    /// iterators.
3297    ///
3298    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3299    fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3300}
3301
3302/// `FromParallelIterator` implements the creation of a collection
3303/// from a [`ParallelIterator`]. By implementing
3304/// `FromParallelIterator` for a given type, you define how it will be
3305/// created from an iterator.
3306///
3307/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`]
3308/// method.
3309///
3310/// [`ParallelIterator`]: trait.ParallelIterator.html
3311/// [`collect()`]: trait.ParallelIterator.html#method.collect
3312///
3313/// # Examples
3314///
3315/// Implementing `FromParallelIterator` for your type:
3316///
3317/// ```
3318/// use par_iter::prelude::*;
3319/// use std::mem;
3320///
3321/// struct BlackHole {
3322///     mass: usize,
3323/// }
3324///
3325/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3326///     fn from_par_iter<I>(par_iter: I) -> Self
3327///         where I: IntoParallelIterator<Item = T>
3328///     {
3329///         let par_iter = par_iter.into_par_iter();
3330///         BlackHole {
3331///             mass: par_iter.count() * mem::size_of::<T>(),
3332///         }
3333///     }
3334/// }
3335///
3336/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3337/// assert_eq!(bh.mass, 4000);
3338/// ```
3339pub trait FromParallelIterator<T>
3340where
3341    T: Send,
3342{
3343    /// Creates an instance of the collection from the parallel iterator
3344    /// `par_iter`.
3345    ///
3346    /// If your collection is not naturally parallel, the easiest (and
3347    /// fastest) way to do this is often to collect `par_iter` into a
3348    /// [`LinkedList`] (via [`collect_vec_list`]) or another intermediate
3349    /// data structure and then sequentially extend your collection. However,
3350    /// a more 'native' technique is to use the [`par_iter.fold`] or
3351    /// [`par_iter.fold_with`] methods to create the collection.
3352    /// Alternatively, if your collection is 'natively' parallel, you
3353    /// can use `par_iter.for_each` to process each element in turn.
3354    ///
3355    /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
3356    /// [`collect_vec_list`]: ParallelIterator::collect_vec_list
3357    /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
3358    /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
3359    /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
3360    fn from_par_iter<I>(par_iter: I) -> Self
3361    where
3362        I: IntoParallelIterator<Item = T>;
3363}
3364
3365/// `ParallelExtend` extends an existing collection with items from a
3366/// [`ParallelIterator`].
3367///
3368/// [`ParallelIterator`]: trait.ParallelIterator.html
3369///
3370/// # Examples
3371///
3372/// Implementing `ParallelExtend` for your type:
3373///
3374/// ```
3375/// use par_iter::prelude::*;
3376/// use std::mem;
3377///
3378/// struct BlackHole {
3379///     mass: usize,
3380/// }
3381///
3382/// impl<T: Send> ParallelExtend<T> for BlackHole {
3383///     fn par_extend<I>(&mut self, par_iter: I)
3384///         where I: IntoParallelIterator<Item = T>
3385///     {
3386///         let par_iter = par_iter.into_par_iter();
3387///         self.mass += par_iter.count() * mem::size_of::<T>();
3388///     }
3389/// }
3390///
3391/// let mut bh = BlackHole { mass: 0 };
3392/// bh.par_extend(0i32..1000);
3393/// assert_eq!(bh.mass, 4000);
3394/// bh.par_extend(0i64..10);
3395/// assert_eq!(bh.mass, 4080);
3396/// ```
3397pub trait ParallelExtend<T>
3398where
3399    T: Send,
3400{
3401    /// Extends an instance of the collection with the elements drawn
3402    /// from the parallel iterator `par_iter`.
3403    ///
3404    /// # Examples
3405    ///
3406    /// ```
3407    /// use par_iter::prelude::*;
3408    ///
3409    /// let mut vec = vec![];
3410    /// vec.par_extend(0..5);
3411    /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3412    /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3413    /// ```
3414    fn par_extend<I>(&mut self, par_iter: I)
3415    where
3416        I: IntoParallelIterator<Item = T>;
3417}
3418
3419/// `ParallelDrainFull` creates a parallel iterator that moves all items
3420/// from a collection while retaining the original capacity.
3421///
3422/// Types which are indexable typically implement [`ParallelDrainRange`]
3423/// instead, where you can drain fully with `par_drain(..)`.
3424///
3425/// [`ParallelDrainRange`]: trait.ParallelDrainRange.html
3426pub trait ParallelDrainFull {
3427    /// The draining parallel iterator type that will be created.
3428    type Iter: ParallelIterator<Item = Self::Item>;
3429
3430    /// The type of item that the parallel iterator will produce.
3431    /// This is usually the same as `IntoParallelIterator::Item`.
3432    type Item: Send;
3433
3434    /// Returns a draining parallel iterator over an entire collection.
3435    ///
3436    /// When the iterator is dropped, all items are removed, even if the
3437    /// iterator was not fully consumed. If the iterator is leaked, for example
3438    /// using `std::mem::forget`, it is unspecified how many items are removed.
3439    ///
3440    /// # Examples
3441    ///
3442    /// ```
3443    /// use par_iter::prelude::*;
3444    /// use std::collections::{BinaryHeap, HashSet};
3445    ///
3446    /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3447    ///
3448    /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3449    /// assert_eq!(
3450    ///     // heaps are drained in arbitrary order
3451    ///     heap.par_drain()
3452    ///         .inspect(|x| assert!(squares.contains(x)))
3453    ///         .count(),
3454    ///     squares.len(),
3455    /// );
3456    /// assert!(heap.is_empty());
3457    /// assert!(heap.capacity() >= squares.len());
3458    /// ```
3459    fn par_drain(self) -> Self::Iter;
3460}
3461
3462/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3463/// from a collection while retaining the original capacity.
3464///
3465/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3466///
3467/// [`ParallelDrainFull`]: trait.ParallelDrainFull.html
3468pub trait ParallelDrainRange<Idx = usize> {
3469    /// The draining parallel iterator type that will be created.
3470    type Iter: ParallelIterator<Item = Self::Item>;
3471
3472    /// The type of item that the parallel iterator will produce.
3473    /// This is usually the same as `IntoParallelIterator::Item`.
3474    type Item: Send;
3475
3476    /// Returns a draining parallel iterator over a range of the collection.
3477    ///
3478    /// When the iterator is dropped, all items in the range are removed, even
3479    /// if the iterator was not fully consumed. If the iterator is leaked, for
3480    /// example using `std::mem::forget`, it is unspecified how many items are
3481    /// removed.
3482    ///
3483    /// # Examples
3484    ///
3485    /// ```
3486    /// use par_iter::prelude::*;
3487    ///
3488    /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3489    ///
3490    /// println!("RangeFull");
3491    /// let mut vec = squares.clone();
3492    /// assert!(vec.par_drain(..)
3493    ///            .eq(squares.par_iter().copied()));
3494    /// assert!(vec.is_empty());
3495    /// assert!(vec.capacity() >= squares.len());
3496    ///
3497    /// println!("RangeFrom");
3498    /// let mut vec = squares.clone();
3499    /// assert!(vec.par_drain(5..)
3500    ///            .eq(squares[5..].par_iter().copied()));
3501    /// assert_eq!(&vec[..], &squares[..5]);
3502    /// assert!(vec.capacity() >= squares.len());
3503    ///
3504    /// println!("RangeTo");
3505    /// let mut vec = squares.clone();
3506    /// assert!(vec.par_drain(..5)
3507    ///            .eq(squares[..5].par_iter().copied()));
3508    /// assert_eq!(&vec[..], &squares[5..]);
3509    /// assert!(vec.capacity() >= squares.len());
3510    ///
3511    /// println!("RangeToInclusive");
3512    /// let mut vec = squares.clone();
3513    /// assert!(vec.par_drain(..=5)
3514    ///            .eq(squares[..=5].par_iter().copied()));
3515    /// assert_eq!(&vec[..], &squares[6..]);
3516    /// assert!(vec.capacity() >= squares.len());
3517    ///
3518    /// println!("Range");
3519    /// let mut vec = squares.clone();
3520    /// assert!(vec.par_drain(3..7)
3521    ///            .eq(squares[3..7].par_iter().copied()));
3522    /// assert_eq!(&vec[..3], &squares[..3]);
3523    /// assert_eq!(&vec[3..], &squares[7..]);
3524    /// assert!(vec.capacity() >= squares.len());
3525    ///
3526    /// println!("RangeInclusive");
3527    /// let mut vec = squares.clone();
3528    /// assert!(vec.par_drain(3..=7)
3529    ///            .eq(squares[3..=7].par_iter().copied()));
3530    /// assert_eq!(&vec[..3], &squares[..3]);
3531    /// assert_eq!(&vec[3..], &squares[8..]);
3532    /// assert!(vec.capacity() >= squares.len());
3533    /// ```
3534    fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3535}
3536
3537/// We hide the `Try` trait in a private module, as it's only meant to be a
3538/// stable clone of the standard library's `Try` trait, as yet unstable.
3539mod private {
3540    use std::{
3541        convert::Infallible,
3542        ops::ControlFlow::{self, Break, Continue},
3543        task::Poll,
3544    };
3545
3546    /// Clone of `std::ops::Try`.
3547    ///
3548    /// Implementing this trait is not permitted outside of `rayon`.
3549    pub trait Try {
3550        private_decl! {}
3551
3552        type Output;
3553        type Residual;
3554
3555        fn from_output(output: Self::Output) -> Self;
3556
3557        fn from_residual(residual: Self::Residual) -> Self;
3558
3559        fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3560    }
3561
3562    impl<B, C> Try for ControlFlow<B, C> {
3563        type Output = C;
3564        type Residual = ControlFlow<B, Infallible>;
3565
3566        private_impl! {}
3567
3568        fn from_output(output: Self::Output) -> Self {
3569            Continue(output)
3570        }
3571
3572        fn from_residual(residual: Self::Residual) -> Self {
3573            match residual {
3574                Break(b) => Break(b),
3575                #[allow(unreachable_patterns)]
3576                Continue(_) => unreachable!(),
3577            }
3578        }
3579
3580        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3581            match self {
3582                Continue(c) => Continue(c),
3583                Break(b) => Break(Break(b)),
3584            }
3585        }
3586    }
3587
3588    impl<T> Try for Option<T> {
3589        type Output = T;
3590        type Residual = Option<Infallible>;
3591
3592        private_impl! {}
3593
3594        fn from_output(output: Self::Output) -> Self {
3595            Some(output)
3596        }
3597
3598        fn from_residual(residual: Self::Residual) -> Self {
3599            match residual {
3600                None => None,
3601                #[allow(unreachable_patterns)]
3602                Some(_) => unreachable!(),
3603            }
3604        }
3605
3606        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3607            match self {
3608                Some(c) => Continue(c),
3609                None => Break(None),
3610            }
3611        }
3612    }
3613
3614    impl<T, E> Try for Result<T, E> {
3615        type Output = T;
3616        type Residual = Result<Infallible, E>;
3617
3618        private_impl! {}
3619
3620        fn from_output(output: Self::Output) -> Self {
3621            Ok(output)
3622        }
3623
3624        fn from_residual(residual: Self::Residual) -> Self {
3625            match residual {
3626                Err(e) => Err(e),
3627                #[allow(unreachable_patterns)]
3628                Ok(_) => unreachable!(),
3629            }
3630        }
3631
3632        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3633            match self {
3634                Ok(c) => Continue(c),
3635                Err(e) => Break(Err(e)),
3636            }
3637        }
3638    }
3639
3640    impl<T, E> Try for Poll<Result<T, E>> {
3641        type Output = Poll<T>;
3642        type Residual = Result<Infallible, E>;
3643
3644        private_impl! {}
3645
3646        fn from_output(output: Self::Output) -> Self {
3647            output.map(Ok)
3648        }
3649
3650        fn from_residual(residual: Self::Residual) -> Self {
3651            match residual {
3652                Err(e) => Poll::Ready(Err(e)),
3653                #[allow(unreachable_patterns)]
3654                Ok(_) => unreachable!(),
3655            }
3656        }
3657
3658        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3659            match self {
3660                Poll::Pending => Continue(Poll::Pending),
3661                Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3662                Poll::Ready(Err(e)) => Break(Err(e)),
3663            }
3664        }
3665    }
3666
3667    impl<T, E> Try for Poll<Option<Result<T, E>>> {
3668        type Output = Poll<Option<T>>;
3669        type Residual = Result<Infallible, E>;
3670
3671        private_impl! {}
3672
3673        fn from_output(output: Self::Output) -> Self {
3674            match output {
3675                Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3676                Poll::Pending => Poll::Pending,
3677            }
3678        }
3679
3680        fn from_residual(residual: Self::Residual) -> Self {
3681            match residual {
3682                Err(e) => Poll::Ready(Some(Err(e))),
3683                #[allow(unreachable_patterns)]
3684                Ok(_) => unreachable!(),
3685            }
3686        }
3687
3688        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3689            match self {
3690                Poll::Pending => Continue(Poll::Pending),
3691                Poll::Ready(None) => Continue(Poll::Ready(None)),
3692                Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3693                Poll::Ready(Some(Err(e))) => Break(Err(e)),
3694            }
3695        }
3696    }
3697}