rayon_wasm/iter/mod.rs
1//! Traits for writing parallel programs using an iterator-style interface
2//!
3//! You will rarely need to interact with this module directly unless you have
4//! need to name one of the iterator types.
5//!
6//! Parallel iterators make it easy to write iterator-like chains that
7//! execute in parallel: typically all you have to do is convert the
8//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10//! example, to compute the sum of the squares of a sequence of
11//! integers, one might write:
12//!
13//! ```rust
14//! use rayon::prelude::*;
15//! fn sum_of_squares(input: &[i32]) -> i32 {
16//! input.par_iter()
17//! .map(|i| i * i)
18//! .sum()
19//! }
20//! ```
21//!
22//! Or, to increment all the integers in a slice, you could write:
23//!
24//! ```rust
25//! use rayon::prelude::*;
26//! fn increment_all(input: &mut [i32]) {
27//! input.par_iter_mut()
28//! .for_each(|p| *p += 1);
29//! }
30//! ```
31//!
32//! To use parallel iterators, first import the traits by adding
33//! something like `use rayon::prelude::*` to your module. You can
34//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35//! parallel iterator. Like a [regular iterator][], parallel
36//! iterators work by first constructing a computation and then
37//! executing it.
38//!
39//! In addition to `par_iter()` and friends, some types offer other
40//! ways to create (or consume) parallel iterators:
41//!
42//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43//! `par_windows`, as well as various parallel sorting
44//! operations. See [the `ParallelSlice` trait] for the full list.
45//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46//! See [the `ParallelString` trait] for the full list.
47//! - Various collections offer [`par_extend`], which grows a
48//! collection given a parallel iterator. (If you don't have a
49//! collection to extend, you can use [`collect()`] to create a new
50//! one from scratch.)
51//!
52//! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53//! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54//! [`par_extend`]: trait.ParallelExtend.html
55//! [`collect()`]: trait.ParallelIterator.html#method.collect
56//!
57//! To see the full range of methods available on parallel iterators,
58//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59//! traits.
60//!
61//! If you'd like to build a custom parallel iterator, or to write your own
62//! combinator, then check out the [split] function and the [plumbing] module.
63//!
64//! [regular iterator]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
65//! [`ParallelIterator`]: trait.ParallelIterator.html
66//! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67//! [split]: fn.split.html
68//! [plumbing]: plumbing/index.html
69//!
70//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71//! has been deliberately obscured from the public API. This trait is intended
72//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74//! `None`/`Err` values will exit early.
75//!
76//! A note about object safety: It is currently _not_ possible to wrap
77//! a `ParallelIterator` (or any trait that depends on it) using a
78//! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
79//! because `ParallelIterator` is **not object-safe**.
80//! (This keeps the implementation simpler and allows extra optimizations.)
81
82use self::plumbing::*;
83use self::private::Try;
84pub use either::Either;
85use std::cmp::{self, Ordering};
86use std::iter::{Product, Sum};
87use std::ops::{Fn, RangeBounds};
88
89pub mod plumbing;
90
91#[cfg(test)]
92mod test;
93
94// There is a method to the madness here:
95//
96// - These modules are private but expose certain types to the end-user
97// (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
98// public API surface of the `ParallelIterator` traits.
99// - In **this** module, those public types are always used unprefixed, which forces
100// us to add a `pub use` and helps identify if we missed anything.
101// - In contrast, items that appear **only** in the body of a method,
102// e.g. `find::find()`, are always used **prefixed**, so that they
103// can be readily distinguished.
104
105mod chain;
106mod chunks;
107mod cloned;
108mod collect;
109mod copied;
110mod empty;
111mod enumerate;
112mod extend;
113mod filter;
114mod filter_map;
115mod find;
116mod find_first_last;
117mod flat_map;
118mod flat_map_iter;
119mod flatten;
120mod flatten_iter;
121mod fold;
122mod fold_chunks;
123mod fold_chunks_with;
124mod for_each;
125mod from_par_iter;
126mod inspect;
127mod interleave;
128mod interleave_shortest;
129mod intersperse;
130mod len;
131mod map;
132mod map_with;
133mod multizip;
134mod noop;
135mod once;
136mod panic_fuse;
137mod par_bridge;
138mod positions;
139mod product;
140mod reduce;
141mod repeat;
142mod rev;
143mod skip;
144mod skip_any;
145mod skip_any_while;
146mod splitter;
147mod step_by;
148mod sum;
149mod take;
150mod take_any;
151mod take_any_while;
152mod try_fold;
153mod try_reduce;
154mod try_reduce_with;
155mod unzip;
156mod update;
157mod while_some;
158mod zip;
159mod zip_eq;
160
161pub use self::{
162 chain::Chain,
163 chunks::Chunks,
164 cloned::Cloned,
165 copied::Copied,
166 empty::{empty, Empty},
167 enumerate::Enumerate,
168 filter::Filter,
169 filter_map::FilterMap,
170 flat_map::FlatMap,
171 flat_map_iter::FlatMapIter,
172 flatten::Flatten,
173 flatten_iter::FlattenIter,
174 fold::{Fold, FoldWith},
175 fold_chunks::FoldChunks,
176 fold_chunks_with::FoldChunksWith,
177 inspect::Inspect,
178 interleave::Interleave,
179 interleave_shortest::InterleaveShortest,
180 intersperse::Intersperse,
181 len::{MaxLen, MinLen},
182 map::Map,
183 map_with::{MapInit, MapWith},
184 multizip::MultiZip,
185 once::{once, Once},
186 panic_fuse::PanicFuse,
187 par_bridge::{IterBridge, ParallelBridge},
188 positions::Positions,
189 repeat::{repeat, repeatn, Repeat, RepeatN},
190 rev::Rev,
191 skip::Skip,
192 skip_any::SkipAny,
193 skip_any_while::SkipAnyWhile,
194 splitter::{split, Split},
195 step_by::StepBy,
196 take::Take,
197 take_any::TakeAny,
198 take_any_while::TakeAnyWhile,
199 try_fold::{TryFold, TryFoldWith},
200 update::Update,
201 while_some::WhileSome,
202 zip::Zip,
203 zip_eq::ZipEq,
204};
205
206/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
207///
208/// By implementing `IntoParallelIterator` for a type, you define how it will
209/// transformed into an iterator. This is a parallel version of the standard
210/// library's [`std::iter::IntoIterator`] trait.
211///
212/// [`ParallelIterator`]: trait.ParallelIterator.html
213/// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
214pub trait IntoParallelIterator {
215 /// The parallel iterator type that will be created.
216 type Iter: ParallelIterator<Item = Self::Item>;
217
218 /// The type of item that the parallel iterator will produce.
219 type Item: Send;
220
221 /// Converts `self` into a parallel iterator.
222 ///
223 /// # Examples
224 ///
225 /// ```
226 /// use rayon::prelude::*;
227 ///
228 /// println!("counting in parallel:");
229 /// (0..100).into_par_iter()
230 /// .for_each(|i| println!("{}", i));
231 /// ```
232 ///
233 /// This conversion is often implicit for arguments to methods like [`zip`].
234 ///
235 /// ```
236 /// use rayon::prelude::*;
237 ///
238 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
239 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
240 /// ```
241 ///
242 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
243 fn into_par_iter(self) -> Self::Iter;
244}
245
246/// `IntoParallelRefIterator` implements the conversion to a
247/// [`ParallelIterator`], providing shared references to the data.
248///
249/// This is a parallel version of the `iter()` method
250/// defined by various collections.
251///
252/// This trait is automatically implemented
253/// `for I where &I: IntoParallelIterator`. In most cases, users
254/// will want to implement [`IntoParallelIterator`] rather than implement
255/// this trait directly.
256///
257/// [`ParallelIterator`]: trait.ParallelIterator.html
258/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
259pub trait IntoParallelRefIterator<'data> {
260 /// The type of the parallel iterator that will be returned.
261 type Iter: ParallelIterator<Item = Self::Item>;
262
263 /// The type of item that the parallel iterator will produce.
264 /// This will typically be an `&'data T` reference type.
265 type Item: Send + 'data;
266
267 /// Converts `self` into a parallel iterator.
268 ///
269 /// # Examples
270 ///
271 /// ```
272 /// use rayon::prelude::*;
273 ///
274 /// let v: Vec<_> = (0..100).collect();
275 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
276 ///
277 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
278 /// // producing the exact same references.
279 /// assert!(v.par_iter().zip(&v)
280 /// .all(|(a, b)| std::ptr::eq(a, b)));
281 /// ```
282 fn par_iter(&'data self) -> Self::Iter;
283}
284
285impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
286where
287 &'data I: IntoParallelIterator,
288{
289 type Iter = <&'data I as IntoParallelIterator>::Iter;
290 type Item = <&'data I as IntoParallelIterator>::Item;
291
292 fn par_iter(&'data self) -> Self::Iter {
293 self.into_par_iter()
294 }
295}
296
297/// `IntoParallelRefMutIterator` implements the conversion to a
298/// [`ParallelIterator`], providing mutable references to the data.
299///
300/// This is a parallel version of the `iter_mut()` method
301/// defined by various collections.
302///
303/// This trait is automatically implemented
304/// `for I where &mut I: IntoParallelIterator`. In most cases, users
305/// will want to implement [`IntoParallelIterator`] rather than implement
306/// this trait directly.
307///
308/// [`ParallelIterator`]: trait.ParallelIterator.html
309/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
310pub trait IntoParallelRefMutIterator<'data> {
311 /// The type of iterator that will be created.
312 type Iter: ParallelIterator<Item = Self::Item>;
313
314 /// The type of item that will be produced; this is typically an
315 /// `&'data mut T` reference.
316 type Item: Send + 'data;
317
318 /// Creates the parallel iterator from `self`.
319 ///
320 /// # Examples
321 ///
322 /// ```
323 /// use rayon::prelude::*;
324 ///
325 /// let mut v = vec![0usize; 5];
326 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
327 /// assert_eq!(v, [0, 1, 2, 3, 4]);
328 /// ```
329 fn par_iter_mut(&'data mut self) -> Self::Iter;
330}
331
332impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
333where
334 &'data mut I: IntoParallelIterator,
335{
336 type Iter = <&'data mut I as IntoParallelIterator>::Iter;
337 type Item = <&'data mut I as IntoParallelIterator>::Item;
338
339 fn par_iter_mut(&'data mut self) -> Self::Iter {
340 self.into_par_iter()
341 }
342}
343
344/// Parallel version of the standard iterator trait.
345///
346/// The combinators on this trait are available on **all** parallel
347/// iterators. Additional methods can be found on the
348/// [`IndexedParallelIterator`] trait: those methods are only
349/// available for parallel iterators where the number of items is
350/// known in advance (so, e.g., after invoking `filter`, those methods
351/// become unavailable).
352///
353/// For examples of using parallel iterators, see [the docs on the
354/// `iter` module][iter].
355///
356/// [iter]: index.html
357/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
358pub trait ParallelIterator: Sized + Send {
359 /// The type of item that this parallel iterator produces.
360 /// For example, if you use the [`for_each`] method, this is the type of
361 /// item that your closure will be invoked with.
362 ///
363 /// [`for_each`]: #method.for_each
364 type Item: Send;
365
366 /// Executes `OP` on each item produced by the iterator, in parallel.
367 ///
368 /// # Examples
369 ///
370 /// ```
371 /// use rayon::prelude::*;
372 ///
373 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
374 /// ```
375 fn for_each<OP>(self, op: OP)
376 where
377 OP: Fn(Self::Item) + Sync + Send,
378 {
379 for_each::for_each(self, &op)
380 }
381
382 /// Executes `OP` on the given `init` value with each item produced by
383 /// the iterator, in parallel.
384 ///
385 /// The `init` value will be cloned only as needed to be paired with
386 /// the group of items in each rayon job. It does not require the type
387 /// to be `Sync`.
388 ///
389 /// # Examples
390 ///
391 /// ```
392 /// use std::sync::mpsc::channel;
393 /// use rayon::prelude::*;
394 ///
395 /// let (sender, receiver) = channel();
396 ///
397 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
398 ///
399 /// let mut res: Vec<_> = receiver.iter().collect();
400 ///
401 /// res.sort();
402 ///
403 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
404 /// ```
405 fn for_each_with<OP, T>(self, init: T, op: OP)
406 where
407 OP: Fn(&mut T, Self::Item) + Sync + Send,
408 T: Send + Clone,
409 {
410 self.map_with(init, op).collect()
411 }
412
413 /// Executes `OP` on a value returned by `init` with each item produced by
414 /// the iterator, in parallel.
415 ///
416 /// The `init` function will be called only as needed for a value to be
417 /// paired with the group of items in each rayon job. There is no
418 /// constraint on that returned type at all!
419 ///
420 /// # Examples
421 ///
422 /// ```
423 /// use rand::Rng;
424 /// use rayon::prelude::*;
425 ///
426 /// let mut v = vec![0u8; 1_000_000];
427 ///
428 /// v.par_chunks_mut(1000)
429 /// .for_each_init(
430 /// || rand::thread_rng(),
431 /// |rng, chunk| rng.fill(chunk),
432 /// );
433 ///
434 /// // There's a remote chance that this will fail...
435 /// for i in 0u8..=255 {
436 /// assert!(v.contains(&i));
437 /// }
438 /// ```
439 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
440 where
441 OP: Fn(&mut T, Self::Item) + Sync + Send,
442 INIT: Fn() -> T + Sync + Send,
443 {
444 self.map_init(init, op).collect()
445 }
446
447 /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
448 ///
449 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
450 /// stop processing the rest of the items in the iterator as soon as
451 /// possible, and we will return that terminating value. Otherwise, we will
452 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are
453 /// multiple errors in parallel, it is not specified which will be returned.
454 ///
455 /// # Examples
456 ///
457 /// ```
458 /// use rayon::prelude::*;
459 /// use std::io::{self, Write};
460 ///
461 /// // This will stop iteration early if there's any write error, like
462 /// // having piped output get closed on the other end.
463 /// (0..100).into_par_iter()
464 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
465 /// .expect("expected no write errors");
466 /// ```
467 fn try_for_each<OP, R>(self, op: OP) -> R
468 where
469 OP: Fn(Self::Item) -> R + Sync + Send,
470 R: Try<Output = ()> + Send,
471 {
472 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
473 R::from_output(())
474 }
475
476 self.map(op).try_reduce(<()>::default, ok)
477 }
478
479 /// Executes a fallible `OP` on the given `init` value with each item
480 /// produced by the iterator, in parallel.
481 ///
482 /// This combines the `init` semantics of [`for_each_with()`] and the
483 /// failure semantics of [`try_for_each()`].
484 ///
485 /// [`for_each_with()`]: #method.for_each_with
486 /// [`try_for_each()`]: #method.try_for_each
487 ///
488 /// # Examples
489 ///
490 /// ```
491 /// use std::sync::mpsc::channel;
492 /// use rayon::prelude::*;
493 ///
494 /// let (sender, receiver) = channel();
495 ///
496 /// (0..5).into_par_iter()
497 /// .try_for_each_with(sender, |s, x| s.send(x))
498 /// .expect("expected no send errors");
499 ///
500 /// let mut res: Vec<_> = receiver.iter().collect();
501 ///
502 /// res.sort();
503 ///
504 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
505 /// ```
506 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
507 where
508 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
509 T: Send + Clone,
510 R: Try<Output = ()> + Send,
511 {
512 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
513 R::from_output(())
514 }
515
516 self.map_with(init, op).try_reduce(<()>::default, ok)
517 }
518
519 /// Executes a fallible `OP` on a value returned by `init` with each item
520 /// produced by the iterator, in parallel.
521 ///
522 /// This combines the `init` semantics of [`for_each_init()`] and the
523 /// failure semantics of [`try_for_each()`].
524 ///
525 /// [`for_each_init()`]: #method.for_each_init
526 /// [`try_for_each()`]: #method.try_for_each
527 ///
528 /// # Examples
529 ///
530 /// ```
531 /// use rand::Rng;
532 /// use rayon::prelude::*;
533 ///
534 /// let mut v = vec![0u8; 1_000_000];
535 ///
536 /// v.par_chunks_mut(1000)
537 /// .try_for_each_init(
538 /// || rand::thread_rng(),
539 /// |rng, chunk| rng.try_fill(chunk),
540 /// )
541 /// .expect("expected no rand errors");
542 ///
543 /// // There's a remote chance that this will fail...
544 /// for i in 0u8..=255 {
545 /// assert!(v.contains(&i));
546 /// }
547 /// ```
548 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
549 where
550 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
551 INIT: Fn() -> T + Sync + Send,
552 R: Try<Output = ()> + Send,
553 {
554 fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
555 R::from_output(())
556 }
557
558 self.map_init(init, op).try_reduce(<()>::default, ok)
559 }
560
561 /// Counts the number of items in this parallel iterator.
562 ///
563 /// # Examples
564 ///
565 /// ```
566 /// use rayon::prelude::*;
567 ///
568 /// let count = (0..100).into_par_iter().count();
569 ///
570 /// assert_eq!(count, 100);
571 /// ```
572 fn count(self) -> usize {
573 fn one<T>(_: T) -> usize {
574 1
575 }
576
577 self.map(one).sum()
578 }
579
580 /// Applies `map_op` to each item of this iterator, producing a new
581 /// iterator with the results.
582 ///
583 /// # Examples
584 ///
585 /// ```
586 /// use rayon::prelude::*;
587 ///
588 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
589 ///
590 /// let doubles: Vec<_> = par_iter.collect();
591 ///
592 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
593 /// ```
594 fn map<F, R>(self, map_op: F) -> Map<Self, F>
595 where
596 F: Fn(Self::Item) -> R + Sync + Send,
597 R: Send,
598 {
599 Map::new(self, map_op)
600 }
601
602 /// Applies `map_op` to the given `init` value with each item of this
603 /// iterator, producing a new iterator with the results.
604 ///
605 /// The `init` value will be cloned only as needed to be paired with
606 /// the group of items in each rayon job. It does not require the type
607 /// to be `Sync`.
608 ///
609 /// # Examples
610 ///
611 /// ```
612 /// use std::sync::mpsc::channel;
613 /// use rayon::prelude::*;
614 ///
615 /// let (sender, receiver) = channel();
616 ///
617 /// let a: Vec<_> = (0..5)
618 /// .into_par_iter() // iterating over i32
619 /// .map_with(sender, |s, x| {
620 /// s.send(x).unwrap(); // sending i32 values through the channel
621 /// x // returning i32
622 /// })
623 /// .collect(); // collecting the returned values into a vector
624 ///
625 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel
626 /// .collect(); // and collecting them
627 /// b.sort();
628 ///
629 /// assert_eq!(a, b);
630 /// ```
631 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
632 where
633 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
634 T: Send + Clone,
635 R: Send,
636 {
637 MapWith::new(self, init, map_op)
638 }
639
640 /// Applies `map_op` to a value returned by `init` with each item of this
641 /// iterator, producing a new iterator with the results.
642 ///
643 /// The `init` function will be called only as needed for a value to be
644 /// paired with the group of items in each rayon job. There is no
645 /// constraint on that returned type at all!
646 ///
647 /// # Examples
648 ///
649 /// ```
650 /// use rand::Rng;
651 /// use rayon::prelude::*;
652 ///
653 /// let a: Vec<_> = (1i32..1_000_000)
654 /// .into_par_iter()
655 /// .map_init(
656 /// || rand::thread_rng(), // get the thread-local RNG
657 /// |rng, x| if rng.gen() { // randomly negate items
658 /// -x
659 /// } else {
660 /// x
661 /// },
662 /// ).collect();
663 ///
664 /// // There's a remote chance that this will fail...
665 /// assert!(a.iter().any(|&x| x < 0));
666 /// assert!(a.iter().any(|&x| x > 0));
667 /// ```
668 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
669 where
670 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
671 INIT: Fn() -> T + Sync + Send,
672 R: Send,
673 {
674 MapInit::new(self, init, map_op)
675 }
676
677 /// Creates an iterator which clones all of its elements. This may be
678 /// useful when you have an iterator over `&T`, but you need `T`, and
679 /// that type implements `Clone`. See also [`copied()`].
680 ///
681 /// [`copied()`]: #method.copied
682 ///
683 /// # Examples
684 ///
685 /// ```
686 /// use rayon::prelude::*;
687 ///
688 /// let a = [1, 2, 3];
689 ///
690 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
691 ///
692 /// // cloned is the same as .map(|&x| x), for integers
693 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
694 ///
695 /// assert_eq!(v_cloned, vec![1, 2, 3]);
696 /// assert_eq!(v_map, vec![1, 2, 3]);
697 /// ```
698 fn cloned<'a, T>(self) -> Cloned<Self>
699 where
700 T: 'a + Clone + Send,
701 Self: ParallelIterator<Item = &'a T>,
702 {
703 Cloned::new(self)
704 }
705
706 /// Creates an iterator which copies all of its elements. This may be
707 /// useful when you have an iterator over `&T`, but you need `T`, and
708 /// that type implements `Copy`. See also [`cloned()`].
709 ///
710 /// [`cloned()`]: #method.cloned
711 ///
712 /// # Examples
713 ///
714 /// ```
715 /// use rayon::prelude::*;
716 ///
717 /// let a = [1, 2, 3];
718 ///
719 /// let v_copied: Vec<_> = a.par_iter().copied().collect();
720 ///
721 /// // copied is the same as .map(|&x| x), for integers
722 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
723 ///
724 /// assert_eq!(v_copied, vec![1, 2, 3]);
725 /// assert_eq!(v_map, vec![1, 2, 3]);
726 /// ```
727 fn copied<'a, T>(self) -> Copied<Self>
728 where
729 T: 'a + Copy + Send,
730 Self: ParallelIterator<Item = &'a T>,
731 {
732 Copied::new(self)
733 }
734
735 /// Applies `inspect_op` to a reference to each item of this iterator,
736 /// producing a new iterator passing through the original items. This is
737 /// often useful for debugging to see what's happening in iterator stages.
738 ///
739 /// # Examples
740 ///
741 /// ```
742 /// use rayon::prelude::*;
743 ///
744 /// let a = [1, 4, 2, 3];
745 ///
746 /// // this iterator sequence is complex.
747 /// let sum = a.par_iter()
748 /// .cloned()
749 /// .filter(|&x| x % 2 == 0)
750 /// .reduce(|| 0, |sum, i| sum + i);
751 ///
752 /// println!("{}", sum);
753 ///
754 /// // let's add some inspect() calls to investigate what's happening
755 /// let sum = a.par_iter()
756 /// .cloned()
757 /// .inspect(|x| println!("about to filter: {}", x))
758 /// .filter(|&x| x % 2 == 0)
759 /// .inspect(|x| println!("made it through filter: {}", x))
760 /// .reduce(|| 0, |sum, i| sum + i);
761 ///
762 /// println!("{}", sum);
763 /// ```
764 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
765 where
766 OP: Fn(&Self::Item) + Sync + Send,
767 {
768 Inspect::new(self, inspect_op)
769 }
770
771 /// Mutates each item of this iterator before yielding it.
772 ///
773 /// # Examples
774 ///
775 /// ```
776 /// use rayon::prelude::*;
777 ///
778 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
779 ///
780 /// let doubles: Vec<_> = par_iter.collect();
781 ///
782 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
783 /// ```
784 fn update<F>(self, update_op: F) -> Update<Self, F>
785 where
786 F: Fn(&mut Self::Item) + Sync + Send,
787 {
788 Update::new(self, update_op)
789 }
790
791 /// Applies `filter_op` to each item of this iterator, producing a new
792 /// iterator with only the items that gave `true` results.
793 ///
794 /// # Examples
795 ///
796 /// ```
797 /// use rayon::prelude::*;
798 ///
799 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
800 ///
801 /// let even_numbers: Vec<_> = par_iter.collect();
802 ///
803 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
804 /// ```
805 fn filter<P>(self, filter_op: P) -> Filter<Self, P>
806 where
807 P: Fn(&Self::Item) -> bool + Sync + Send,
808 {
809 Filter::new(self, filter_op)
810 }
811
812 /// Applies `filter_op` to each item of this iterator to get an `Option`,
813 /// producing a new iterator with only the items from `Some` results.
814 ///
815 /// # Examples
816 ///
817 /// ```
818 /// use rayon::prelude::*;
819 ///
820 /// let mut par_iter = (0..10).into_par_iter()
821 /// .filter_map(|x| {
822 /// if x % 2 == 0 { Some(x * 3) }
823 /// else { None }
824 /// });
825 ///
826 /// let even_numbers: Vec<_> = par_iter.collect();
827 ///
828 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
829 /// ```
830 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
831 where
832 P: Fn(Self::Item) -> Option<R> + Sync + Send,
833 R: Send,
834 {
835 FilterMap::new(self, filter_op)
836 }
837
838 /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
839 /// producing a new parallel iterator that flattens these back into one.
840 ///
841 /// See also [`flat_map_iter`](#method.flat_map_iter).
842 ///
843 /// # Examples
844 ///
845 /// ```
846 /// use rayon::prelude::*;
847 ///
848 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
849 ///
850 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
851 ///
852 /// let vec: Vec<_> = par_iter.collect();
853 ///
854 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
855 /// ```
856 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
857 where
858 F: Fn(Self::Item) -> PI + Sync + Send,
859 PI: IntoParallelIterator,
860 {
861 FlatMap::new(self, map_op)
862 }
863
864 /// Applies `map_op` to each item of this iterator to get nested serial iterators,
865 /// producing a new parallel iterator that flattens these back into one.
866 ///
867 /// # `flat_map_iter` versus `flat_map`
868 ///
869 /// These two methods are similar but behave slightly differently. With [`flat_map`],
870 /// each of the nested iterators must be a parallel iterator, and they will be further
871 /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
872 /// sequential `Iterator`, and we only parallelize _between_ them, while the items
873 /// produced by each nested iterator are processed sequentially.
874 ///
875 /// When choosing between these methods, consider whether nested parallelism suits the
876 /// potential iterators at hand. If there's little computation involved, or its length
877 /// is much less than the outer parallel iterator, then it may perform better to avoid
878 /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
879 /// If there is a lot of computation, potentially outweighing the outer parallel
880 /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
881 ///
882 /// [`flat_map`]: #method.flat_map
883 ///
884 /// # Examples
885 ///
886 /// ```
887 /// use rayon::prelude::*;
888 /// use std::cell::RefCell;
889 ///
890 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
891 ///
892 /// let par_iter = a.par_iter().flat_map_iter(|a| {
893 /// // The serial iterator doesn't have to be thread-safe, just its items.
894 /// let cell_iter = RefCell::new(a.iter().cloned());
895 /// std::iter::from_fn(move || cell_iter.borrow_mut().next())
896 /// });
897 ///
898 /// let vec: Vec<_> = par_iter.collect();
899 ///
900 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
901 /// ```
902 fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
903 where
904 F: Fn(Self::Item) -> SI + Sync + Send,
905 SI: IntoIterator,
906 SI::Item: Send,
907 {
908 FlatMapIter::new(self, map_op)
909 }
910
911 /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
912 ///
913 /// See also [`flatten_iter`](#method.flatten_iter).
914 ///
915 /// # Examples
916 ///
917 /// ```
918 /// use rayon::prelude::*;
919 ///
920 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
921 /// let y: Vec<_> = x.into_par_iter().flatten().collect();
922 ///
923 /// assert_eq!(y, vec![1, 2, 3, 4]);
924 /// ```
925 fn flatten(self) -> Flatten<Self>
926 where
927 Self::Item: IntoParallelIterator,
928 {
929 Flatten::new(self)
930 }
931
932 /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
933 ///
934 /// See also [`flatten`](#method.flatten) and the analogous comparison of
935 /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
936 ///
937 /// # Examples
938 ///
939 /// ```
940 /// use rayon::prelude::*;
941 ///
942 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
943 /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
944 /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
945 ///
946 /// assert_eq!(y, vec![1, 2, 3, 4]);
947 /// ```
948 fn flatten_iter(self) -> FlattenIter<Self>
949 where
950 Self::Item: IntoIterator,
951 <Self::Item as IntoIterator>::Item: Send,
952 {
953 FlattenIter::new(self)
954 }
955
956 /// Reduces the items in the iterator into one item using `op`.
957 /// The argument `identity` should be a closure that can produce
958 /// "identity" value which may be inserted into the sequence as
959 /// needed to create opportunities for parallel execution. So, for
960 /// example, if you are doing a summation, then `identity()` ought
961 /// to produce something that represents the zero for your type
962 /// (but consider just calling `sum()` in that case).
963 ///
964 /// # Examples
965 ///
966 /// ```
967 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
968 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
969 /// // where the first/second elements are summed separately.
970 /// use rayon::prelude::*;
971 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
972 /// .par_iter() // iterating over &(i32, i32)
973 /// .cloned() // iterating over (i32, i32)
974 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns
975 /// |a, b| (a.0 + b.0, a.1 + b.1));
976 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
977 /// ```
978 ///
979 /// **Note:** unlike a sequential `fold` operation, the order in
980 /// which `op` will be applied to reduce the result is not fully
981 /// specified. So `op` should be [associative] or else the results
982 /// will be non-deterministic. And of course `identity()` should
983 /// produce a true identity.
984 ///
985 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
986 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
987 where
988 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
989 ID: Fn() -> Self::Item + Sync + Send,
990 {
991 reduce::reduce(self, identity, op)
992 }
993
994 /// Reduces the items in the iterator into one item using `op`.
995 /// If the iterator is empty, `None` is returned; otherwise,
996 /// `Some` is returned.
997 ///
998 /// This version of `reduce` is simple but somewhat less
999 /// efficient. If possible, it is better to call `reduce()`, which
1000 /// requires an identity element.
1001 ///
1002 /// # Examples
1003 ///
1004 /// ```
1005 /// use rayon::prelude::*;
1006 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1007 /// .par_iter() // iterating over &(i32, i32)
1008 /// .cloned() // iterating over (i32, i32)
1009 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1010 /// .unwrap();
1011 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1012 /// ```
1013 ///
1014 /// **Note:** unlike a sequential `fold` operation, the order in
1015 /// which `op` will be applied to reduce the result is not fully
1016 /// specified. So `op` should be [associative] or else the results
1017 /// will be non-deterministic.
1018 ///
1019 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1020 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1021 where
1022 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1023 {
1024 fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1025 move |opt_a, b| match opt_a {
1026 Some(a) => Some(op(a, b)),
1027 None => Some(b),
1028 }
1029 }
1030
1031 fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1032 move |opt_a, opt_b| match (opt_a, opt_b) {
1033 (Some(a), Some(b)) => Some(op(a, b)),
1034 (Some(v), None) | (None, Some(v)) => Some(v),
1035 (None, None) => None,
1036 }
1037 }
1038
1039 self.fold(<_>::default, opt_fold(&op))
1040 .reduce(<_>::default, opt_reduce(&op))
1041 }
1042
1043 /// Reduces the items in the iterator into one item using a fallible `op`.
1044 /// The `identity` argument is used the same way as in [`reduce()`].
1045 ///
1046 /// [`reduce()`]: #method.reduce
1047 ///
1048 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1049 /// to one, we will attempt to stop processing the rest of the items in the
1050 /// iterator as soon as possible, and we will return that terminating value.
1051 /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1052 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not
1053 /// specified which will be returned.
1054 ///
1055 /// # Examples
1056 ///
1057 /// ```
1058 /// use rayon::prelude::*;
1059 ///
1060 /// // Compute the sum of squares, being careful about overflow.
1061 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1062 /// iter.into_par_iter()
1063 /// .map(|i| i.checked_mul(i)) // square each item,
1064 /// .try_reduce(|| 0, i32::checked_add) // and add them up!
1065 /// }
1066 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1067 ///
1068 /// // The sum might overflow
1069 /// assert_eq!(sum_squares(0..10_000), None);
1070 ///
1071 /// // Or the squares might overflow before it even reaches `try_reduce`
1072 /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1073 /// ```
1074 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1075 where
1076 OP: Fn(T, T) -> Self::Item + Sync + Send,
1077 ID: Fn() -> T + Sync + Send,
1078 Self::Item: Try<Output = T>,
1079 {
1080 try_reduce::try_reduce(self, identity, op)
1081 }
1082
1083 /// Reduces the items in the iterator into one item using a fallible `op`.
1084 ///
1085 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1086 /// otherwise, `Some` is returned. Beyond that, it behaves like
1087 /// [`try_reduce()`] for handling `Err`/`None`.
1088 ///
1089 /// [`reduce_with()`]: #method.reduce_with
1090 /// [`try_reduce()`]: #method.try_reduce
1091 ///
1092 /// For instance, with `Option` items, the return value may be:
1093 /// - `None`, the iterator was empty
1094 /// - `Some(None)`, we stopped after encountering `None`.
1095 /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1096 ///
1097 /// With `Result` items, the nesting is more obvious:
1098 /// - `None`, the iterator was empty
1099 /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1100 /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1101 ///
1102 /// # Examples
1103 ///
1104 /// ```
1105 /// use rayon::prelude::*;
1106 ///
1107 /// let files = ["/dev/null", "/does/not/exist"];
1108 ///
1109 /// // Find the biggest file
1110 /// files.into_par_iter()
1111 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1112 /// .try_reduce_with(|a, b| {
1113 /// Ok(if a.1 >= b.1 { a } else { b })
1114 /// })
1115 /// .expect("Some value, since the iterator is not empty")
1116 /// .expect_err("not found");
1117 /// ```
1118 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1119 where
1120 OP: Fn(T, T) -> Self::Item + Sync + Send,
1121 Self::Item: Try<Output = T>,
1122 {
1123 try_reduce_with::try_reduce_with(self, op)
1124 }
1125
1126 /// Parallel fold is similar to sequential fold except that the
1127 /// sequence of items may be subdivided before it is
1128 /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1129 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1130 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1131 /// 77, and so forth. The **parallel fold** works similarly except
1132 /// that it first breaks up your list into sublists, and hence
1133 /// instead of yielding up a single sum at the end, it yields up
1134 /// multiple sums. The number of results is nondeterministic, as
1135 /// is the point where the breaks occur.
1136 ///
1137 /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1138 /// our example list, we might wind up with a sequence of two numbers,
1139 /// like so:
1140 ///
1141 /// ```notrust
1142 /// 22 3 77 89 46
1143 /// | |
1144 /// 102 135
1145 /// ```
1146 ///
1147 /// Or perhaps these three numbers:
1148 ///
1149 /// ```notrust
1150 /// 22 3 77 89 46
1151 /// | | |
1152 /// 102 89 46
1153 /// ```
1154 ///
1155 /// In general, Rayon will attempt to find good breaking points
1156 /// that keep all of your cores busy.
1157 ///
1158 /// ### Fold versus reduce
1159 ///
1160 /// The `fold()` and `reduce()` methods each take an identity element
1161 /// and a combining function, but they operate rather differently.
1162 ///
1163 /// `reduce()` requires that the identity function has the same
1164 /// type as the things you are iterating over, and it fully
1165 /// reduces the list of items into a single item. So, for example,
1166 /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1167 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1168 /// u8| a + b)`, we would get an overflow. This is because `0`,
1169 /// `a`, and `b` here are all bytes, just like the numbers in the
1170 /// list (I wrote the types explicitly above, but those are the
1171 /// only types you can use). To avoid the overflow, we would need
1172 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1173 /// b| a + b)`, in which case our result would be `256`.
1174 ///
1175 /// In contrast, with `fold()`, the identity function does not
1176 /// have to have the same type as the things you are iterating
1177 /// over, and you potentially get back many results. So, if we
1178 /// continue with the `bytes` example from the previous paragraph,
1179 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1180 /// convert our bytes into `u32`. And of course we might not get
1181 /// back a single sum.
1182 ///
1183 /// There is a more subtle distinction as well, though it's
1184 /// actually implied by the above points. When you use `reduce()`,
1185 /// your reduction function is sometimes called with values that
1186 /// were never part of your original parallel iterator (for
1187 /// example, both the left and right might be a partial sum). With
1188 /// `fold()`, in contrast, the left value in the fold function is
1189 /// always the accumulator, and the right value is always from
1190 /// your original sequence.
1191 ///
1192 /// ### Fold vs Map/Reduce
1193 ///
1194 /// Fold makes sense if you have some operation where it is
1195 /// cheaper to create groups of elements at a time. For example,
1196 /// imagine collecting characters into a string. If you were going
1197 /// to use map/reduce, you might try this:
1198 ///
1199 /// ```
1200 /// use rayon::prelude::*;
1201 ///
1202 /// let s =
1203 /// ['a', 'b', 'c', 'd', 'e']
1204 /// .par_iter()
1205 /// .map(|c: &char| format!("{}", c))
1206 /// .reduce(|| String::new(),
1207 /// |mut a: String, b: String| { a.push_str(&b); a });
1208 ///
1209 /// assert_eq!(s, "abcde");
1210 /// ```
1211 ///
1212 /// Because reduce produces the same type of element as its input,
1213 /// you have to first map each character into a string, and then
1214 /// you can reduce them. This means we create one string per
1215 /// element in our iterator -- not so great. Using `fold`, we can
1216 /// do this instead:
1217 ///
1218 /// ```
1219 /// use rayon::prelude::*;
1220 ///
1221 /// let s =
1222 /// ['a', 'b', 'c', 'd', 'e']
1223 /// .par_iter()
1224 /// .fold(|| String::new(),
1225 /// |mut s: String, c: &char| { s.push(*c); s })
1226 /// .reduce(|| String::new(),
1227 /// |mut a: String, b: String| { a.push_str(&b); a });
1228 ///
1229 /// assert_eq!(s, "abcde");
1230 /// ```
1231 ///
1232 /// Now `fold` will process groups of our characters at a time,
1233 /// and we only make one string per group. We should wind up with
1234 /// some small-ish number of strings roughly proportional to the
1235 /// number of CPUs you have (it will ultimately depend on how busy
1236 /// your processors are). Note that we still need to do a reduce
1237 /// afterwards to combine those groups of strings into a single
1238 /// string.
1239 ///
1240 /// You could use a similar trick to save partial results (e.g., a
1241 /// cache) or something similar.
1242 ///
1243 /// ### Combining fold with other operations
1244 ///
1245 /// You can combine `fold` with `reduce` if you want to produce a
1246 /// single value. This is then roughly equivalent to a map/reduce
1247 /// combination in effect:
1248 ///
1249 /// ```
1250 /// use rayon::prelude::*;
1251 ///
1252 /// let bytes = 0..22_u8;
1253 /// let sum = bytes.into_par_iter()
1254 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1255 /// .sum::<u32>();
1256 ///
1257 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1258 /// ```
1259 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1260 where
1261 F: Fn(T, Self::Item) -> T + Sync + Send,
1262 ID: Fn() -> T + Sync + Send,
1263 T: Send,
1264 {
1265 Fold::new(self, identity, fold_op)
1266 }
1267
1268 /// Applies `fold_op` to the given `init` value with each item of this
1269 /// iterator, finally producing the value for further use.
1270 ///
1271 /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1272 /// it doesn't require the `init` type to be `Sync`, nor any other form
1273 /// of added synchronization.
1274 ///
1275 /// # Examples
1276 ///
1277 /// ```
1278 /// use rayon::prelude::*;
1279 ///
1280 /// let bytes = 0..22_u8;
1281 /// let sum = bytes.into_par_iter()
1282 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1283 /// .sum::<u32>();
1284 ///
1285 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1286 /// ```
1287 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1288 where
1289 F: Fn(T, Self::Item) -> T + Sync + Send,
1290 T: Send + Clone,
1291 {
1292 FoldWith::new(self, init, fold_op)
1293 }
1294
1295 /// Performs a fallible parallel fold.
1296 ///
1297 /// This is a variation of [`fold()`] for operations which can fail with
1298 /// `Option::None` or `Result::Err`. The first such failure stops
1299 /// processing the local set of items, without affecting other folds in the
1300 /// iterator's subdivisions.
1301 ///
1302 /// Often, `try_fold()` will be followed by [`try_reduce()`]
1303 /// for a final reduction and global short-circuiting effect.
1304 ///
1305 /// [`fold()`]: #method.fold
1306 /// [`try_reduce()`]: #method.try_reduce
1307 ///
1308 /// # Examples
1309 ///
1310 /// ```
1311 /// use rayon::prelude::*;
1312 ///
1313 /// let bytes = 0..22_u8;
1314 /// let sum = bytes.into_par_iter()
1315 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1316 /// .try_reduce(|| 0, u32::checked_add);
1317 ///
1318 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1319 /// ```
1320 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1321 where
1322 F: Fn(T, Self::Item) -> R + Sync + Send,
1323 ID: Fn() -> T + Sync + Send,
1324 R: Try<Output = T> + Send,
1325 {
1326 TryFold::new(self, identity, fold_op)
1327 }
1328
1329 /// Performs a fallible parallel fold with a cloneable `init` value.
1330 ///
1331 /// This combines the `init` semantics of [`fold_with()`] and the failure
1332 /// semantics of [`try_fold()`].
1333 ///
1334 /// [`fold_with()`]: #method.fold_with
1335 /// [`try_fold()`]: #method.try_fold
1336 ///
1337 /// ```
1338 /// use rayon::prelude::*;
1339 ///
1340 /// let bytes = 0..22_u8;
1341 /// let sum = bytes.into_par_iter()
1342 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1343 /// .try_reduce(|| 0, u32::checked_add);
1344 ///
1345 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1346 /// ```
1347 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1348 where
1349 F: Fn(T, Self::Item) -> R + Sync + Send,
1350 R: Try<Output = T> + Send,
1351 T: Clone + Send,
1352 {
1353 TryFoldWith::new(self, init, fold_op)
1354 }
1355
1356 /// Sums up the items in the iterator.
1357 ///
1358 /// Note that the order in items will be reduced is not specified,
1359 /// so if the `+` operator is not truly [associative] \(as is the
1360 /// case for floating point numbers), then the results are not
1361 /// fully deterministic.
1362 ///
1363 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1364 ///
1365 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1366 /// except that the type of `0` and the `+` operation may vary
1367 /// depending on the type of value being produced.
1368 ///
1369 /// # Examples
1370 ///
1371 /// ```
1372 /// use rayon::prelude::*;
1373 ///
1374 /// let a = [1, 5, 7];
1375 ///
1376 /// let sum: i32 = a.par_iter().sum();
1377 ///
1378 /// assert_eq!(sum, 13);
1379 /// ```
1380 fn sum<S>(self) -> S
1381 where
1382 S: Send + Sum<Self::Item> + Sum<S>,
1383 {
1384 sum::sum(self)
1385 }
1386
1387 /// Multiplies all the items in the iterator.
1388 ///
1389 /// Note that the order in items will be reduced is not specified,
1390 /// so if the `*` operator is not truly [associative] \(as is the
1391 /// case for floating point numbers), then the results are not
1392 /// fully deterministic.
1393 ///
1394 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1395 ///
1396 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1397 /// except that the type of `1` and the `*` operation may vary
1398 /// depending on the type of value being produced.
1399 ///
1400 /// # Examples
1401 ///
1402 /// ```
1403 /// use rayon::prelude::*;
1404 ///
1405 /// fn factorial(n: u32) -> u32 {
1406 /// (1..n+1).into_par_iter().product()
1407 /// }
1408 ///
1409 /// assert_eq!(factorial(0), 1);
1410 /// assert_eq!(factorial(1), 1);
1411 /// assert_eq!(factorial(5), 120);
1412 /// ```
1413 fn product<P>(self) -> P
1414 where
1415 P: Send + Product<Self::Item> + Product<P>,
1416 {
1417 product::product(self)
1418 }
1419
1420 /// Computes the minimum of all the items in the iterator. If the
1421 /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1422 /// is returned.
1423 ///
1424 /// Note that the order in which the items will be reduced is not
1425 /// specified, so if the `Ord` impl is not truly associative, then
1426 /// the results are not deterministic.
1427 ///
1428 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1429 ///
1430 /// # Examples
1431 ///
1432 /// ```
1433 /// use rayon::prelude::*;
1434 ///
1435 /// let a = [45, 74, 32];
1436 ///
1437 /// assert_eq!(a.par_iter().min(), Some(&32));
1438 ///
1439 /// let b: [i32; 0] = [];
1440 ///
1441 /// assert_eq!(b.par_iter().min(), None);
1442 /// ```
1443 fn min(self) -> Option<Self::Item>
1444 where
1445 Self::Item: Ord,
1446 {
1447 self.reduce_with(cmp::min)
1448 }
1449
1450 /// Computes the minimum of all the items in the iterator with respect to
1451 /// the given comparison function. If the iterator is empty, `None` is
1452 /// returned; otherwise, `Some(min)` is returned.
1453 ///
1454 /// Note that the order in which the items will be reduced is not
1455 /// specified, so if the comparison function is not associative, then
1456 /// the results are not deterministic.
1457 ///
1458 /// # Examples
1459 ///
1460 /// ```
1461 /// use rayon::prelude::*;
1462 ///
1463 /// let a = [-3_i32, 77, 53, 240, -1];
1464 ///
1465 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1466 /// ```
1467 fn min_by<F>(self, f: F) -> Option<Self::Item>
1468 where
1469 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1470 {
1471 fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1472 move |a, b| match f(&a, &b) {
1473 Ordering::Greater => b,
1474 _ => a,
1475 }
1476 }
1477
1478 self.reduce_with(min(f))
1479 }
1480
1481 /// Computes the item that yields the minimum value for the given
1482 /// function. If the iterator is empty, `None` is returned;
1483 /// otherwise, `Some(item)` is returned.
1484 ///
1485 /// Note that the order in which the items will be reduced is not
1486 /// specified, so if the `Ord` impl is not truly associative, then
1487 /// the results are not deterministic.
1488 ///
1489 /// # Examples
1490 ///
1491 /// ```
1492 /// use rayon::prelude::*;
1493 ///
1494 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1495 ///
1496 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1497 /// ```
1498 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1499 where
1500 K: Ord + Send,
1501 F: Sync + Send + Fn(&Self::Item) -> K,
1502 {
1503 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1504 move |x| (f(&x), x)
1505 }
1506
1507 fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1508 match (a.0).cmp(&b.0) {
1509 Ordering::Greater => b,
1510 _ => a,
1511 }
1512 }
1513
1514 let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1515 Some(x)
1516 }
1517
1518 /// Computes the maximum of all the items in the iterator. If the
1519 /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1520 /// is returned.
1521 ///
1522 /// Note that the order in which the items will be reduced is not
1523 /// specified, so if the `Ord` impl is not truly associative, then
1524 /// the results are not deterministic.
1525 ///
1526 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1527 ///
1528 /// # Examples
1529 ///
1530 /// ```
1531 /// use rayon::prelude::*;
1532 ///
1533 /// let a = [45, 74, 32];
1534 ///
1535 /// assert_eq!(a.par_iter().max(), Some(&74));
1536 ///
1537 /// let b: [i32; 0] = [];
1538 ///
1539 /// assert_eq!(b.par_iter().max(), None);
1540 /// ```
1541 fn max(self) -> Option<Self::Item>
1542 where
1543 Self::Item: Ord,
1544 {
1545 self.reduce_with(cmp::max)
1546 }
1547
1548 /// Computes the maximum of all the items in the iterator with respect to
1549 /// the given comparison function. If the iterator is empty, `None` is
1550 /// returned; otherwise, `Some(min)` is returned.
1551 ///
1552 /// Note that the order in which the items will be reduced is not
1553 /// specified, so if the comparison function is not associative, then
1554 /// the results are not deterministic.
1555 ///
1556 /// # Examples
1557 ///
1558 /// ```
1559 /// use rayon::prelude::*;
1560 ///
1561 /// let a = [-3_i32, 77, 53, 240, -1];
1562 ///
1563 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1564 /// ```
1565 fn max_by<F>(self, f: F) -> Option<Self::Item>
1566 where
1567 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1568 {
1569 fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1570 move |a, b| match f(&a, &b) {
1571 Ordering::Greater => a,
1572 _ => b,
1573 }
1574 }
1575
1576 self.reduce_with(max(f))
1577 }
1578
1579 /// Computes the item that yields the maximum value for the given
1580 /// function. If the iterator is empty, `None` is returned;
1581 /// otherwise, `Some(item)` is returned.
1582 ///
1583 /// Note that the order in which the items will be reduced is not
1584 /// specified, so if the `Ord` impl is not truly associative, then
1585 /// the results are not deterministic.
1586 ///
1587 /// # Examples
1588 ///
1589 /// ```
1590 /// use rayon::prelude::*;
1591 ///
1592 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1593 ///
1594 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1595 /// ```
1596 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1597 where
1598 K: Ord + Send,
1599 F: Sync + Send + Fn(&Self::Item) -> K,
1600 {
1601 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1602 move |x| (f(&x), x)
1603 }
1604
1605 fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1606 match (a.0).cmp(&b.0) {
1607 Ordering::Greater => a,
1608 _ => b,
1609 }
1610 }
1611
1612 let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1613 Some(x)
1614 }
1615
1616 /// Takes two iterators and creates a new iterator over both.
1617 ///
1618 /// # Examples
1619 ///
1620 /// ```
1621 /// use rayon::prelude::*;
1622 ///
1623 /// let a = [0, 1, 2];
1624 /// let b = [9, 8, 7];
1625 ///
1626 /// let par_iter = a.par_iter().chain(b.par_iter());
1627 ///
1628 /// let chained: Vec<_> = par_iter.cloned().collect();
1629 ///
1630 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1631 /// ```
1632 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1633 where
1634 C: IntoParallelIterator<Item = Self::Item>,
1635 {
1636 Chain::new(self, chain.into_par_iter())
1637 }
1638
1639 /// Searches for **some** item in the parallel iterator that
1640 /// matches the given predicate and returns it. This operation
1641 /// is similar to [`find` on sequential iterators][find] but
1642 /// the item returned may not be the **first** one in the parallel
1643 /// sequence which matches, since we search the entire sequence in parallel.
1644 ///
1645 /// Once a match is found, we will attempt to stop processing
1646 /// the rest of the items in the iterator as soon as possible
1647 /// (just as `find` stops iterating once a match is found).
1648 ///
1649 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1650 ///
1651 /// # Examples
1652 ///
1653 /// ```
1654 /// use rayon::prelude::*;
1655 ///
1656 /// let a = [1, 2, 3, 3];
1657 ///
1658 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1659 ///
1660 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1661 /// ```
1662 fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1663 where
1664 P: Fn(&Self::Item) -> bool + Sync + Send,
1665 {
1666 find::find(self, predicate)
1667 }
1668
1669 /// Searches for the sequentially **first** item in the parallel iterator
1670 /// that matches the given predicate and returns it.
1671 ///
1672 /// Once a match is found, all attempts to the right of the match
1673 /// will be stopped, while attempts to the left must continue in case
1674 /// an earlier match is found.
1675 ///
1676 /// Note that not all parallel iterators have a useful order, much like
1677 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1678 /// just want the first match that discovered anywhere in the iterator,
1679 /// `find_any` is a better choice.
1680 ///
1681 /// # Examples
1682 ///
1683 /// ```
1684 /// use rayon::prelude::*;
1685 ///
1686 /// let a = [1, 2, 3, 3];
1687 ///
1688 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1689 ///
1690 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1691 /// ```
1692 fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1693 where
1694 P: Fn(&Self::Item) -> bool + Sync + Send,
1695 {
1696 find_first_last::find_first(self, predicate)
1697 }
1698
1699 /// Searches for the sequentially **last** item in the parallel iterator
1700 /// that matches the given predicate and returns it.
1701 ///
1702 /// Once a match is found, all attempts to the left of the match
1703 /// will be stopped, while attempts to the right must continue in case
1704 /// a later match is found.
1705 ///
1706 /// Note that not all parallel iterators have a useful order, much like
1707 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
1708 /// order doesn't actually matter to you, `find_any` is a better choice.
1709 ///
1710 /// # Examples
1711 ///
1712 /// ```
1713 /// use rayon::prelude::*;
1714 ///
1715 /// let a = [1, 2, 3, 3];
1716 ///
1717 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1718 ///
1719 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1720 /// ```
1721 fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1722 where
1723 P: Fn(&Self::Item) -> bool + Sync + Send,
1724 {
1725 find_first_last::find_last(self, predicate)
1726 }
1727
1728 /// Applies the given predicate to the items in the parallel iterator
1729 /// and returns **any** non-None result of the map operation.
1730 ///
1731 /// Once a non-None value is produced from the map operation, we will
1732 /// attempt to stop processing the rest of the items in the iterator
1733 /// as soon as possible.
1734 ///
1735 /// Note that this method only returns **some** item in the parallel
1736 /// iterator that is not None from the map predicate. The item returned
1737 /// may not be the **first** non-None value produced in the parallel
1738 /// sequence, since the entire sequence is mapped over in parallel.
1739 ///
1740 /// # Examples
1741 ///
1742 /// ```
1743 /// use rayon::prelude::*;
1744 ///
1745 /// let c = ["lol", "NaN", "5", "5"];
1746 ///
1747 /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1748 ///
1749 /// assert_eq!(found_number, Some(5));
1750 /// ```
1751 fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1752 where
1753 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1754 R: Send,
1755 {
1756 fn yes<T>(_: &T) -> bool {
1757 true
1758 }
1759 self.filter_map(predicate).find_any(yes)
1760 }
1761
1762 /// Applies the given predicate to the items in the parallel iterator and
1763 /// returns the sequentially **first** non-None result of the map operation.
1764 ///
1765 /// Once a non-None value is produced from the map operation, all attempts
1766 /// to the right of the match will be stopped, while attempts to the left
1767 /// must continue in case an earlier match is found.
1768 ///
1769 /// Note that not all parallel iterators have a useful order, much like
1770 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1771 /// just want the first non-None value discovered anywhere in the iterator,
1772 /// `find_map_any` is a better choice.
1773 ///
1774 /// # Examples
1775 ///
1776 /// ```
1777 /// use rayon::prelude::*;
1778 ///
1779 /// let c = ["lol", "NaN", "2", "5"];
1780 ///
1781 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1782 ///
1783 /// assert_eq!(first_number, Some(2));
1784 /// ```
1785 fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1786 where
1787 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1788 R: Send,
1789 {
1790 fn yes<T>(_: &T) -> bool {
1791 true
1792 }
1793 self.filter_map(predicate).find_first(yes)
1794 }
1795
1796 /// Applies the given predicate to the items in the parallel iterator and
1797 /// returns the sequentially **last** non-None result of the map operation.
1798 ///
1799 /// Once a non-None value is produced from the map operation, all attempts
1800 /// to the left of the match will be stopped, while attempts to the right
1801 /// must continue in case a later match is found.
1802 ///
1803 /// Note that not all parallel iterators have a useful order, much like
1804 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1805 /// just want the first non-None value discovered anywhere in the iterator,
1806 /// `find_map_any` is a better choice.
1807 ///
1808 /// # Examples
1809 ///
1810 /// ```
1811 /// use rayon::prelude::*;
1812 ///
1813 /// let c = ["lol", "NaN", "2", "5"];
1814 ///
1815 /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1816 ///
1817 /// assert_eq!(last_number, Some(5));
1818 /// ```
1819 fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1820 where
1821 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1822 R: Send,
1823 {
1824 fn yes<T>(_: &T) -> bool {
1825 true
1826 }
1827 self.filter_map(predicate).find_last(yes)
1828 }
1829
1830 #[doc(hidden)]
1831 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1832 `find_first`, or `find_last`")]
1833 fn find<P>(self, predicate: P) -> Option<Self::Item>
1834 where
1835 P: Fn(&Self::Item) -> bool + Sync + Send,
1836 {
1837 self.find_any(predicate)
1838 }
1839
1840 /// Searches for **some** item in the parallel iterator that
1841 /// matches the given predicate, and if so returns true. Once
1842 /// a match is found, we'll attempt to stop process the rest
1843 /// of the items. Proving that there's no match, returning false,
1844 /// does require visiting every item.
1845 ///
1846 /// # Examples
1847 ///
1848 /// ```
1849 /// use rayon::prelude::*;
1850 ///
1851 /// let a = [0, 12, 3, 4, 0, 23, 0];
1852 ///
1853 /// let is_valid = a.par_iter().any(|&x| x > 10);
1854 ///
1855 /// assert!(is_valid);
1856 /// ```
1857 fn any<P>(self, predicate: P) -> bool
1858 where
1859 P: Fn(Self::Item) -> bool + Sync + Send,
1860 {
1861 self.map(predicate).find_any(bool::clone).is_some()
1862 }
1863
1864 /// Tests that every item in the parallel iterator matches the given
1865 /// predicate, and if so returns true. If a counter-example is found,
1866 /// we'll attempt to stop processing more items, then return false.
1867 ///
1868 /// # Examples
1869 ///
1870 /// ```
1871 /// use rayon::prelude::*;
1872 ///
1873 /// let a = [0, 12, 3, 4, 0, 23, 0];
1874 ///
1875 /// let is_valid = a.par_iter().all(|&x| x > 10);
1876 ///
1877 /// assert!(!is_valid);
1878 /// ```
1879 fn all<P>(self, predicate: P) -> bool
1880 where
1881 P: Fn(Self::Item) -> bool + Sync + Send,
1882 {
1883 #[inline]
1884 fn is_false(x: &bool) -> bool {
1885 !x
1886 }
1887
1888 self.map(predicate).find_any(is_false).is_none()
1889 }
1890
1891 /// Creates an iterator over the `Some` items of this iterator, halting
1892 /// as soon as any `None` is found.
1893 ///
1894 /// # Examples
1895 ///
1896 /// ```
1897 /// use rayon::prelude::*;
1898 /// use std::sync::atomic::{AtomicUsize, Ordering};
1899 ///
1900 /// let counter = AtomicUsize::new(0);
1901 /// let value = (0_i32..2048)
1902 /// .into_par_iter()
1903 /// .map(|x| {
1904 /// counter.fetch_add(1, Ordering::SeqCst);
1905 /// if x < 1024 { Some(x) } else { None }
1906 /// })
1907 /// .while_some()
1908 /// .max();
1909 ///
1910 /// assert!(value < Some(1024));
1911 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1912 /// ```
1913 fn while_some<T>(self) -> WhileSome<Self>
1914 where
1915 Self: ParallelIterator<Item = Option<T>>,
1916 T: Send,
1917 {
1918 WhileSome::new(self)
1919 }
1920
1921 /// Wraps an iterator with a fuse in case of panics, to halt all threads
1922 /// as soon as possible.
1923 ///
1924 /// Panics within parallel iterators are always propagated to the caller,
1925 /// but they don't always halt the rest of the iterator right away, due to
1926 /// the internal semantics of [`join`]. This adaptor makes a greater effort
1927 /// to stop processing other items sooner, with the cost of additional
1928 /// synchronization overhead, which may also inhibit some optimizations.
1929 ///
1930 /// [`join`]: ../fn.join.html#panics
1931 ///
1932 /// # Examples
1933 ///
1934 /// If this code didn't use `panic_fuse()`, it would continue processing
1935 /// many more items in other threads (with long sleep delays) before the
1936 /// panic is finally propagated.
1937 ///
1938 /// ```should_panic
1939 /// use rayon::prelude::*;
1940 /// use std::{thread, time};
1941 ///
1942 /// (0..1_000_000)
1943 /// .into_par_iter()
1944 /// .panic_fuse()
1945 /// .for_each(|i| {
1946 /// // simulate some work
1947 /// thread::sleep(time::Duration::from_secs(1));
1948 /// assert!(i > 0); // oops!
1949 /// });
1950 /// ```
1951 fn panic_fuse(self) -> PanicFuse<Self> {
1952 PanicFuse::new(self)
1953 }
1954
1955 /// Creates a fresh collection containing all the elements produced
1956 /// by this parallel iterator.
1957 ///
1958 /// You may prefer [`collect_into_vec()`] implemented on
1959 /// [`IndexedParallelIterator`], if your underlying iterator also implements
1960 /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1961 /// of how many elements the iterator contains, and even allows you to reuse
1962 /// an existing vector's backing store rather than allocating a fresh vector.
1963 ///
1964 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
1965 /// [`collect_into_vec()`]:
1966 /// trait.IndexedParallelIterator.html#method.collect_into_vec
1967 ///
1968 /// # Examples
1969 ///
1970 /// ```
1971 /// use rayon::prelude::*;
1972 ///
1973 /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1974 ///
1975 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1976 ///
1977 /// assert_eq!(sync_vec, async_vec);
1978 /// ```
1979 ///
1980 /// You can collect a pair of collections like [`unzip`](#method.unzip)
1981 /// for paired items:
1982 ///
1983 /// ```
1984 /// use rayon::prelude::*;
1985 ///
1986 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1987 /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1988 ///
1989 /// assert_eq!(first, [0, 1, 2, 3]);
1990 /// assert_eq!(second, [1, 2, 3, 4]);
1991 /// ```
1992 ///
1993 /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1994 ///
1995 /// ```
1996 /// use rayon::prelude::*;
1997 /// use rayon::iter::Either;
1998 ///
1999 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2000 /// if x % 2 == 0 {
2001 /// Either::Left(x * 4)
2002 /// } else {
2003 /// Either::Right(x * 3)
2004 /// }
2005 /// }).collect();
2006 ///
2007 /// assert_eq!(left, [0, 8, 16, 24]);
2008 /// assert_eq!(right, [3, 9, 15, 21]);
2009 /// ```
2010 ///
2011 /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2012 ///
2013 /// ```
2014 /// use rayon::prelude::*;
2015 /// use rayon::iter::Either;
2016 ///
2017 /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2018 /// = (0..8).into_par_iter().map(|x| {
2019 /// if x % 2 == 0 {
2020 /// (x, Either::Left(x * 4))
2021 /// } else {
2022 /// (-x, Either::Right(x * 3))
2023 /// }
2024 /// }).collect();
2025 ///
2026 /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2027 /// assert_eq!(left, [0, 8, 16, 24]);
2028 /// assert_eq!(right, [3, 9, 15, 21]);
2029 /// ```
2030 ///
2031 /// All of that can _also_ be combined with short-circuiting collection of
2032 /// `Result` or `Option` types:
2033 ///
2034 /// ```
2035 /// use rayon::prelude::*;
2036 /// use rayon::iter::Either;
2037 ///
2038 /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2039 /// = (0..8).into_par_iter().map(|x| {
2040 /// if x > 5 {
2041 /// Err(x)
2042 /// } else if x % 2 == 0 {
2043 /// Ok((x, Either::Left(x * 4)))
2044 /// } else {
2045 /// Ok((-x, Either::Right(x * 3)))
2046 /// }
2047 /// }).collect();
2048 ///
2049 /// let error = result.unwrap_err();
2050 /// assert!(error == 6 || error == 7);
2051 /// ```
2052 fn collect<C>(self) -> C
2053 where
2054 C: FromParallelIterator<Self::Item>,
2055 {
2056 C::from_par_iter(self)
2057 }
2058
2059 /// Unzips the items of a parallel iterator into a pair of arbitrary
2060 /// `ParallelExtend` containers.
2061 ///
2062 /// You may prefer to use `unzip_into_vecs()`, which allocates more
2063 /// efficiently with precise knowledge of how many elements the
2064 /// iterator contains, and even allows you to reuse existing
2065 /// vectors' backing stores rather than allocating fresh vectors.
2066 ///
2067 /// # Examples
2068 ///
2069 /// ```
2070 /// use rayon::prelude::*;
2071 ///
2072 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2073 ///
2074 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2075 ///
2076 /// assert_eq!(left, [0, 1, 2, 3]);
2077 /// assert_eq!(right, [1, 2, 3, 4]);
2078 /// ```
2079 ///
2080 /// Nested pairs can be unzipped too.
2081 ///
2082 /// ```
2083 /// use rayon::prelude::*;
2084 ///
2085 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2086 /// .map(|i| (i, (i * i, i * i * i)))
2087 /// .unzip();
2088 ///
2089 /// assert_eq!(values, [0, 1, 2, 3]);
2090 /// assert_eq!(squares, [0, 1, 4, 9]);
2091 /// assert_eq!(cubes, [0, 1, 8, 27]);
2092 /// ```
2093 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2094 where
2095 Self: ParallelIterator<Item = (A, B)>,
2096 FromA: Default + Send + ParallelExtend<A>,
2097 FromB: Default + Send + ParallelExtend<B>,
2098 A: Send,
2099 B: Send,
2100 {
2101 unzip::unzip(self)
2102 }
2103
2104 /// Partitions the items of a parallel iterator into a pair of arbitrary
2105 /// `ParallelExtend` containers. Items for which the `predicate` returns
2106 /// true go into the first container, and the rest go into the second.
2107 ///
2108 /// Note: unlike the standard `Iterator::partition`, this allows distinct
2109 /// collection types for the left and right items. This is more flexible,
2110 /// but may require new type annotations when converting sequential code
2111 /// that used type inference assuming the two were the same.
2112 ///
2113 /// # Examples
2114 ///
2115 /// ```
2116 /// use rayon::prelude::*;
2117 ///
2118 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2119 ///
2120 /// assert_eq!(left, [0, 2, 4, 6]);
2121 /// assert_eq!(right, [1, 3, 5, 7]);
2122 /// ```
2123 fn partition<A, B, P>(self, predicate: P) -> (A, B)
2124 where
2125 A: Default + Send + ParallelExtend<Self::Item>,
2126 B: Default + Send + ParallelExtend<Self::Item>,
2127 P: Fn(&Self::Item) -> bool + Sync + Send,
2128 {
2129 unzip::partition(self, predicate)
2130 }
2131
2132 /// Partitions and maps the items of a parallel iterator into a pair of
2133 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into
2134 /// the first container, and `Either::Right` items go into the second.
2135 ///
2136 /// # Examples
2137 ///
2138 /// ```
2139 /// use rayon::prelude::*;
2140 /// use rayon::iter::Either;
2141 ///
2142 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2143 /// .partition_map(|x| {
2144 /// if x % 2 == 0 {
2145 /// Either::Left(x * 4)
2146 /// } else {
2147 /// Either::Right(x * 3)
2148 /// }
2149 /// });
2150 ///
2151 /// assert_eq!(left, [0, 8, 16, 24]);
2152 /// assert_eq!(right, [3, 9, 15, 21]);
2153 /// ```
2154 ///
2155 /// Nested `Either` enums can be split as well.
2156 ///
2157 /// ```
2158 /// use rayon::prelude::*;
2159 /// use rayon::iter::Either::*;
2160 ///
2161 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2162 /// .into_par_iter()
2163 /// .partition_map(|x| match (x % 3, x % 5) {
2164 /// (0, 0) => Left(Left(x)),
2165 /// (0, _) => Left(Right(x)),
2166 /// (_, 0) => Right(Left(x)),
2167 /// (_, _) => Right(Right(x)),
2168 /// });
2169 ///
2170 /// assert_eq!(fizzbuzz, [15]);
2171 /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2172 /// assert_eq!(buzz, [5, 10]);
2173 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2174 /// ```
2175 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2176 where
2177 A: Default + Send + ParallelExtend<L>,
2178 B: Default + Send + ParallelExtend<R>,
2179 P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2180 L: Send,
2181 R: Send,
2182 {
2183 unzip::partition_map(self, predicate)
2184 }
2185
2186 /// Intersperses clones of an element between items of this iterator.
2187 ///
2188 /// # Examples
2189 ///
2190 /// ```
2191 /// use rayon::prelude::*;
2192 ///
2193 /// let x = vec![1, 2, 3];
2194 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2195 ///
2196 /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2197 /// ```
2198 fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2199 where
2200 Self::Item: Clone,
2201 {
2202 Intersperse::new(self, element)
2203 }
2204
2205 /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2206 ///
2207 /// This is similar to [`IndexedParallelIterator::take`] without being
2208 /// constrained to the "first" `n` of the original iterator order. The
2209 /// taken items will still maintain their relative order where that is
2210 /// visible in `collect`, `reduce`, and similar outputs.
2211 ///
2212 /// # Examples
2213 ///
2214 /// ```
2215 /// use rayon::prelude::*;
2216 ///
2217 /// let result: Vec<_> = (0..100)
2218 /// .into_par_iter()
2219 /// .filter(|&x| x % 2 == 0)
2220 /// .take_any(5)
2221 /// .collect();
2222 ///
2223 /// assert_eq!(result.len(), 5);
2224 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2225 /// ```
2226 fn take_any(self, n: usize) -> TakeAny<Self> {
2227 TakeAny::new(self, n)
2228 }
2229
2230 /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2231 ///
2232 /// This is similar to [`IndexedParallelIterator::skip`] without being
2233 /// constrained to the "first" `n` of the original iterator order. The
2234 /// remaining items will still maintain their relative order where that is
2235 /// visible in `collect`, `reduce`, and similar outputs.
2236 ///
2237 /// # Examples
2238 ///
2239 /// ```
2240 /// use rayon::prelude::*;
2241 ///
2242 /// let result: Vec<_> = (0..100)
2243 /// .into_par_iter()
2244 /// .filter(|&x| x % 2 == 0)
2245 /// .skip_any(5)
2246 /// .collect();
2247 ///
2248 /// assert_eq!(result.len(), 45);
2249 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2250 /// ```
2251 fn skip_any(self, n: usize) -> SkipAny<Self> {
2252 SkipAny::new(self, n)
2253 }
2254
2255 /// Creates an iterator that takes elements from *anywhere* in the original iterator
2256 /// until the given `predicate` returns `false`.
2257 ///
2258 /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2259 /// global condition unrelated to the item itself, or some combination thereof.
2260 ///
2261 /// If parallel calls to the `predicate` race and give different results, then the
2262 /// `true` results will still take those particular items, while respecting the `false`
2263 /// result from elsewhere to skip any further items.
2264 ///
2265 /// This is similar to [`Iterator::take_while`] without being constrained to the original
2266 /// iterator order. The taken items will still maintain their relative order where that is
2267 /// visible in `collect`, `reduce`, and similar outputs.
2268 ///
2269 /// # Examples
2270 ///
2271 /// ```
2272 /// use rayon::prelude::*;
2273 ///
2274 /// let result: Vec<_> = (0..100)
2275 /// .into_par_iter()
2276 /// .take_any_while(|x| *x < 50)
2277 /// .collect();
2278 ///
2279 /// assert!(result.len() <= 50);
2280 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2281 /// ```
2282 ///
2283 /// ```
2284 /// use rayon::prelude::*;
2285 /// use std::sync::atomic::AtomicUsize;
2286 /// use std::sync::atomic::Ordering::Relaxed;
2287 ///
2288 /// // Collect any group of items that sum <= 1000
2289 /// let quota = AtomicUsize::new(1000);
2290 /// let result: Vec<_> = (0_usize..100)
2291 /// .into_par_iter()
2292 /// .take_any_while(|&x| {
2293 /// quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2294 /// .is_ok()
2295 /// })
2296 /// .collect();
2297 ///
2298 /// let sum = result.iter().sum::<usize>();
2299 /// assert!(matches!(sum, 902..=1000));
2300 /// ```
2301 fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2302 where
2303 P: Fn(&Self::Item) -> bool + Sync + Send,
2304 {
2305 TakeAnyWhile::new(self, predicate)
2306 }
2307
2308 /// Creates an iterator that skips elements from *anywhere* in the original iterator
2309 /// until the given `predicate` returns `false`.
2310 ///
2311 /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2312 /// global condition unrelated to the item itself, or some combination thereof.
2313 ///
2314 /// If parallel calls to the `predicate` race and give different results, then the
2315 /// `true` results will still skip those particular items, while respecting the `false`
2316 /// result from elsewhere to skip any further items.
2317 ///
2318 /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2319 /// iterator order. The remaining items will still maintain their relative order where that is
2320 /// visible in `collect`, `reduce`, and similar outputs.
2321 ///
2322 /// # Examples
2323 ///
2324 /// ```
2325 /// use rayon::prelude::*;
2326 ///
2327 /// let result: Vec<_> = (0..100)
2328 /// .into_par_iter()
2329 /// .skip_any_while(|x| *x < 50)
2330 /// .collect();
2331 ///
2332 /// assert!(result.len() >= 50);
2333 /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2334 /// ```
2335 fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2336 where
2337 P: Fn(&Self::Item) -> bool + Sync + Send,
2338 {
2339 SkipAnyWhile::new(self, predicate)
2340 }
2341
2342 /// Internal method used to define the behavior of this parallel
2343 /// iterator. You should not need to call this directly.
2344 ///
2345 /// This method causes the iterator `self` to start producing
2346 /// items and to feed them to the consumer `consumer` one by one.
2347 /// It may split the consumer before doing so to create the
2348 /// opportunity to produce in parallel.
2349 ///
2350 /// See the [README] for more details on the internals of parallel
2351 /// iterators.
2352 ///
2353 /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
2354 fn drive_unindexed<C>(self, consumer: C) -> C::Result
2355 where
2356 C: UnindexedConsumer<Self::Item>;
2357
2358 /// Internal method used to define the behavior of this parallel
2359 /// iterator. You should not need to call this directly.
2360 ///
2361 /// Returns the number of items produced by this iterator, if known
2362 /// statically. This can be used by consumers to trigger special fast
2363 /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2364 /// use the (indexed) `Consumer` methods when driving a consumer, such
2365 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2366 /// other `UnindexedConsumer` methods -- or returning an inaccurate
2367 /// value -- may result in panics.
2368 ///
2369 /// This method is currently used to optimize `collect` for want
2370 /// of true Rust specialization; it may be removed when
2371 /// specialization is stable.
2372 fn opt_len(&self) -> Option<usize> {
2373 None
2374 }
2375}
2376
2377impl<T: ParallelIterator> IntoParallelIterator for T {
2378 type Iter = T;
2379 type Item = T::Item;
2380
2381 fn into_par_iter(self) -> T {
2382 self
2383 }
2384}
2385
2386/// An iterator that supports "random access" to its data, meaning
2387/// that you can split it at arbitrary indices and draw data from
2388/// those points.
2389///
2390/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2391// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2392#[allow(clippy::len_without_is_empty)]
2393pub trait IndexedParallelIterator: ParallelIterator {
2394 /// Collects the results of the iterator into the specified
2395 /// vector. The vector is always truncated before execution
2396 /// begins. If possible, reusing the vector across calls can lead
2397 /// to better performance since it reuses the same backing buffer.
2398 ///
2399 /// # Examples
2400 ///
2401 /// ```
2402 /// use rayon::prelude::*;
2403 ///
2404 /// // any prior data will be truncated
2405 /// let mut vec = vec![-1, -2, -3];
2406 ///
2407 /// (0..5).into_par_iter()
2408 /// .collect_into_vec(&mut vec);
2409 ///
2410 /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2411 /// ```
2412 fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2413 collect::collect_into_vec(self, target);
2414 }
2415
2416 /// Unzips the results of the iterator into the specified
2417 /// vectors. The vectors are always truncated before execution
2418 /// begins. If possible, reusing the vectors across calls can lead
2419 /// to better performance since they reuse the same backing buffer.
2420 ///
2421 /// # Examples
2422 ///
2423 /// ```
2424 /// use rayon::prelude::*;
2425 ///
2426 /// // any prior data will be truncated
2427 /// let mut left = vec![42; 10];
2428 /// let mut right = vec![-1; 10];
2429 ///
2430 /// (10..15).into_par_iter()
2431 /// .enumerate()
2432 /// .unzip_into_vecs(&mut left, &mut right);
2433 ///
2434 /// assert_eq!(left, [0, 1, 2, 3, 4]);
2435 /// assert_eq!(right, [10, 11, 12, 13, 14]);
2436 /// ```
2437 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2438 where
2439 Self: IndexedParallelIterator<Item = (A, B)>,
2440 A: Send,
2441 B: Send,
2442 {
2443 collect::unzip_into_vecs(self, left, right);
2444 }
2445
2446 /// Iterates over tuples `(A, B)`, where the items `A` are from
2447 /// this iterator and `B` are from the iterator given as argument.
2448 /// Like the `zip` method on ordinary iterators, if the two
2449 /// iterators are of unequal length, you only get the items they
2450 /// have in common.
2451 ///
2452 /// # Examples
2453 ///
2454 /// ```
2455 /// use rayon::prelude::*;
2456 ///
2457 /// let result: Vec<_> = (1..4)
2458 /// .into_par_iter()
2459 /// .zip(vec!['a', 'b', 'c'])
2460 /// .collect();
2461 ///
2462 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2463 /// ```
2464 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2465 where
2466 Z: IntoParallelIterator,
2467 Z::Iter: IndexedParallelIterator,
2468 {
2469 Zip::new(self, zip_op.into_par_iter())
2470 }
2471
2472 /// The same as `Zip`, but requires that both iterators have the same length.
2473 ///
2474 /// # Panics
2475 /// Will panic if `self` and `zip_op` are not the same length.
2476 ///
2477 /// ```should_panic
2478 /// use rayon::prelude::*;
2479 ///
2480 /// let one = [1u8];
2481 /// let two = [2u8, 2];
2482 /// let one_iter = one.par_iter();
2483 /// let two_iter = two.par_iter();
2484 ///
2485 /// // this will panic
2486 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2487 ///
2488 /// // we should never get here
2489 /// assert_eq!(1, zipped.len());
2490 /// ```
2491 #[track_caller]
2492 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2493 where
2494 Z: IntoParallelIterator,
2495 Z::Iter: IndexedParallelIterator,
2496 {
2497 let zip_op_iter = zip_op.into_par_iter();
2498 assert_eq!(
2499 self.len(),
2500 zip_op_iter.len(),
2501 "iterators must have the same length"
2502 );
2503 ZipEq::new(self, zip_op_iter)
2504 }
2505
2506 /// Interleaves elements of this iterator and the other given
2507 /// iterator. Alternately yields elements from this iterator and
2508 /// the given iterator, until both are exhausted. If one iterator
2509 /// is exhausted before the other, the last elements are provided
2510 /// from the other.
2511 ///
2512 /// # Examples
2513 ///
2514 /// ```
2515 /// use rayon::prelude::*;
2516 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2517 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2518 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2519 /// ```
2520 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2521 where
2522 I: IntoParallelIterator<Item = Self::Item>,
2523 I::Iter: IndexedParallelIterator<Item = Self::Item>,
2524 {
2525 Interleave::new(self, other.into_par_iter())
2526 }
2527
2528 /// Interleaves elements of this iterator and the other given
2529 /// iterator, until one is exhausted.
2530 ///
2531 /// # Examples
2532 ///
2533 /// ```
2534 /// use rayon::prelude::*;
2535 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2536 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2537 /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2538 /// ```
2539 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2540 where
2541 I: IntoParallelIterator<Item = Self::Item>,
2542 I::Iter: IndexedParallelIterator<Item = Self::Item>,
2543 {
2544 InterleaveShortest::new(self, other.into_par_iter())
2545 }
2546
2547 /// Splits an iterator up into fixed-size chunks.
2548 ///
2549 /// Returns an iterator that returns `Vec`s of the given number of elements.
2550 /// If the number of elements in the iterator is not divisible by `chunk_size`,
2551 /// the last chunk may be shorter than `chunk_size`.
2552 ///
2553 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2554 /// slices, without having to allocate intermediate `Vec`s for the chunks.
2555 ///
2556 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2557 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2558 ///
2559 /// # Examples
2560 ///
2561 /// ```
2562 /// use rayon::prelude::*;
2563 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2564 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2565 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2566 /// ```
2567 fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2568 assert!(chunk_size != 0, "chunk_size must not be zero");
2569 Chunks::new(self, chunk_size)
2570 }
2571
2572 /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2573 /// each chunk.
2574 ///
2575 /// Returns an iterator that produces a folded result for each chunk of items
2576 /// produced by this iterator.
2577 ///
2578 /// This works essentially like:
2579 ///
2580 /// ```text
2581 /// iter.chunks(chunk_size)
2582 /// .map(|chunk|
2583 /// chunk.into_iter()
2584 /// .fold(identity, fold_op)
2585 /// )
2586 /// ```
2587 ///
2588 /// except there is no per-chunk allocation overhead.
2589 ///
2590 /// [`fold()`]: std::iter::Iterator#method.fold
2591 ///
2592 /// **Panics** if `chunk_size` is 0.
2593 ///
2594 /// # Examples
2595 ///
2596 /// ```
2597 /// use rayon::prelude::*;
2598 /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2599 /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2600 /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2601 /// ```
2602 #[track_caller]
2603 fn fold_chunks<T, ID, F>(
2604 self,
2605 chunk_size: usize,
2606 identity: ID,
2607 fold_op: F,
2608 ) -> FoldChunks<Self, ID, F>
2609 where
2610 ID: Fn() -> T + Send + Sync,
2611 F: Fn(T, Self::Item) -> T + Send + Sync,
2612 T: Send,
2613 {
2614 assert!(chunk_size != 0, "chunk_size must not be zero");
2615 FoldChunks::new(self, chunk_size, identity, fold_op)
2616 }
2617
2618 /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2619 /// each chunk.
2620 ///
2621 /// Returns an iterator that produces a folded result for each chunk of items
2622 /// produced by this iterator.
2623 ///
2624 /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2625 /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2626 /// added synchronization.
2627 ///
2628 /// [`fold()`]: std::iter::Iterator#method.fold
2629 ///
2630 /// **Panics** if `chunk_size` is 0.
2631 ///
2632 /// # Examples
2633 ///
2634 /// ```
2635 /// use rayon::prelude::*;
2636 /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2637 /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2638 /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2639 /// ```
2640 #[track_caller]
2641 fn fold_chunks_with<T, F>(
2642 self,
2643 chunk_size: usize,
2644 init: T,
2645 fold_op: F,
2646 ) -> FoldChunksWith<Self, T, F>
2647 where
2648 T: Send + Clone,
2649 F: Fn(T, Self::Item) -> T + Send + Sync,
2650 {
2651 assert!(chunk_size != 0, "chunk_size must not be zero");
2652 FoldChunksWith::new(self, chunk_size, init, fold_op)
2653 }
2654
2655 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2656 /// another.
2657 ///
2658 /// # Examples
2659 ///
2660 /// ```
2661 /// use rayon::prelude::*;
2662 /// use std::cmp::Ordering::*;
2663 ///
2664 /// let x = vec![1, 2, 3];
2665 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2666 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2667 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2668 /// ```
2669 fn cmp<I>(self, other: I) -> Ordering
2670 where
2671 I: IntoParallelIterator<Item = Self::Item>,
2672 I::Iter: IndexedParallelIterator,
2673 Self::Item: Ord,
2674 {
2675 #[inline]
2676 fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2677 Ord::cmp(&x, &y)
2678 }
2679
2680 #[inline]
2681 fn inequal(&ord: &Ordering) -> bool {
2682 ord != Ordering::Equal
2683 }
2684
2685 let other = other.into_par_iter();
2686 let ord_len = self.len().cmp(&other.len());
2687 self.zip(other)
2688 .map(ordering)
2689 .find_first(inequal)
2690 .unwrap_or(ord_len)
2691 }
2692
2693 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2694 /// another.
2695 ///
2696 /// # Examples
2697 ///
2698 /// ```
2699 /// use rayon::prelude::*;
2700 /// use std::cmp::Ordering::*;
2701 /// use std::f64::NAN;
2702 ///
2703 /// let x = vec![1.0, 2.0, 3.0];
2704 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2705 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2706 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2707 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2708 /// ```
2709 fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2710 where
2711 I: IntoParallelIterator,
2712 I::Iter: IndexedParallelIterator,
2713 Self::Item: PartialOrd<I::Item>,
2714 {
2715 #[inline]
2716 fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2717 PartialOrd::partial_cmp(&x, &y)
2718 }
2719
2720 #[inline]
2721 fn inequal(&ord: &Option<Ordering>) -> bool {
2722 ord != Some(Ordering::Equal)
2723 }
2724
2725 let other = other.into_par_iter();
2726 let ord_len = self.len().cmp(&other.len());
2727 self.zip(other)
2728 .map(ordering)
2729 .find_first(inequal)
2730 .unwrap_or(Some(ord_len))
2731 }
2732
2733 /// Determines if the elements of this `ParallelIterator`
2734 /// are equal to those of another
2735 fn eq<I>(self, other: I) -> bool
2736 where
2737 I: IntoParallelIterator,
2738 I::Iter: IndexedParallelIterator,
2739 Self::Item: PartialEq<I::Item>,
2740 {
2741 #[inline]
2742 fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2743 PartialEq::eq(&x, &y)
2744 }
2745
2746 let other = other.into_par_iter();
2747 self.len() == other.len() && self.zip(other).all(eq)
2748 }
2749
2750 /// Determines if the elements of this `ParallelIterator`
2751 /// are unequal to those of another
2752 fn ne<I>(self, other: I) -> bool
2753 where
2754 I: IntoParallelIterator,
2755 I::Iter: IndexedParallelIterator,
2756 Self::Item: PartialEq<I::Item>,
2757 {
2758 !self.eq(other)
2759 }
2760
2761 /// Determines if the elements of this `ParallelIterator`
2762 /// are lexicographically less than those of another.
2763 fn lt<I>(self, other: I) -> bool
2764 where
2765 I: IntoParallelIterator,
2766 I::Iter: IndexedParallelIterator,
2767 Self::Item: PartialOrd<I::Item>,
2768 {
2769 self.partial_cmp(other) == Some(Ordering::Less)
2770 }
2771
2772 /// Determines if the elements of this `ParallelIterator`
2773 /// are less or equal to those of another.
2774 fn le<I>(self, other: I) -> bool
2775 where
2776 I: IntoParallelIterator,
2777 I::Iter: IndexedParallelIterator,
2778 Self::Item: PartialOrd<I::Item>,
2779 {
2780 let ord = self.partial_cmp(other);
2781 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2782 }
2783
2784 /// Determines if the elements of this `ParallelIterator`
2785 /// are lexicographically greater than those of another.
2786 fn gt<I>(self, other: I) -> bool
2787 where
2788 I: IntoParallelIterator,
2789 I::Iter: IndexedParallelIterator,
2790 Self::Item: PartialOrd<I::Item>,
2791 {
2792 self.partial_cmp(other) == Some(Ordering::Greater)
2793 }
2794
2795 /// Determines if the elements of this `ParallelIterator`
2796 /// are less or equal to those of another.
2797 fn ge<I>(self, other: I) -> bool
2798 where
2799 I: IntoParallelIterator,
2800 I::Iter: IndexedParallelIterator,
2801 Self::Item: PartialOrd<I::Item>,
2802 {
2803 let ord = self.partial_cmp(other);
2804 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2805 }
2806
2807 /// Yields an index along with each item.
2808 ///
2809 /// # Examples
2810 ///
2811 /// ```
2812 /// use rayon::prelude::*;
2813 ///
2814 /// let chars = vec!['a', 'b', 'c'];
2815 /// let result: Vec<_> = chars
2816 /// .into_par_iter()
2817 /// .enumerate()
2818 /// .collect();
2819 ///
2820 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2821 /// ```
2822 fn enumerate(self) -> Enumerate<Self> {
2823 Enumerate::new(self)
2824 }
2825
2826 /// Creates an iterator that steps by the given amount
2827 ///
2828 /// # Examples
2829 ///
2830 /// ```
2831 ///use rayon::prelude::*;
2832 ///
2833 /// let range = (3..10);
2834 /// let result: Vec<i32> = range
2835 /// .into_par_iter()
2836 /// .step_by(3)
2837 /// .collect();
2838 ///
2839 /// assert_eq!(result, [3, 6, 9])
2840 /// ```
2841 fn step_by(self, step: usize) -> StepBy<Self> {
2842 StepBy::new(self, step)
2843 }
2844
2845 /// Creates an iterator that skips the first `n` elements.
2846 ///
2847 /// # Examples
2848 ///
2849 /// ```
2850 /// use rayon::prelude::*;
2851 ///
2852 /// let result: Vec<_> = (0..100)
2853 /// .into_par_iter()
2854 /// .skip(95)
2855 /// .collect();
2856 ///
2857 /// assert_eq!(result, [95, 96, 97, 98, 99]);
2858 /// ```
2859 fn skip(self, n: usize) -> Skip<Self> {
2860 Skip::new(self, n)
2861 }
2862
2863 /// Creates an iterator that yields the first `n` elements.
2864 ///
2865 /// # Examples
2866 ///
2867 /// ```
2868 /// use rayon::prelude::*;
2869 ///
2870 /// let result: Vec<_> = (0..100)
2871 /// .into_par_iter()
2872 /// .take(5)
2873 /// .collect();
2874 ///
2875 /// assert_eq!(result, [0, 1, 2, 3, 4]);
2876 /// ```
2877 fn take(self, n: usize) -> Take<Self> {
2878 Take::new(self, n)
2879 }
2880
2881 /// Searches for **some** item in the parallel iterator that
2882 /// matches the given predicate, and returns its index. Like
2883 /// `ParallelIterator::find_any`, the parallel search will not
2884 /// necessarily find the **first** match, and once a match is
2885 /// found we'll attempt to stop processing any more.
2886 ///
2887 /// # Examples
2888 ///
2889 /// ```
2890 /// use rayon::prelude::*;
2891 ///
2892 /// let a = [1, 2, 3, 3];
2893 ///
2894 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2895 /// assert!(i == 2 || i == 3);
2896 ///
2897 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2898 /// ```
2899 fn position_any<P>(self, predicate: P) -> Option<usize>
2900 where
2901 P: Fn(Self::Item) -> bool + Sync + Send,
2902 {
2903 #[inline]
2904 fn check(&(_, p): &(usize, bool)) -> bool {
2905 p
2906 }
2907
2908 let (i, _) = self.map(predicate).enumerate().find_any(check)?;
2909 Some(i)
2910 }
2911
2912 /// Searches for the sequentially **first** item in the parallel iterator
2913 /// that matches the given predicate, and returns its index.
2914 ///
2915 /// Like `ParallelIterator::find_first`, once a match is found,
2916 /// all attempts to the right of the match will be stopped, while
2917 /// attempts to the left must continue in case an earlier match
2918 /// is found.
2919 ///
2920 /// Note that not all parallel iterators have a useful order, much like
2921 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
2922 /// just want the first match that discovered anywhere in the iterator,
2923 /// `position_any` is a better choice.
2924 ///
2925 /// # Examples
2926 ///
2927 /// ```
2928 /// use rayon::prelude::*;
2929 ///
2930 /// let a = [1, 2, 3, 3];
2931 ///
2932 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2933 ///
2934 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2935 /// ```
2936 fn position_first<P>(self, predicate: P) -> Option<usize>
2937 where
2938 P: Fn(Self::Item) -> bool + Sync + Send,
2939 {
2940 #[inline]
2941 fn check(&(_, p): &(usize, bool)) -> bool {
2942 p
2943 }
2944
2945 let (i, _) = self.map(predicate).enumerate().find_first(check)?;
2946 Some(i)
2947 }
2948
2949 /// Searches for the sequentially **last** item in the parallel iterator
2950 /// that matches the given predicate, and returns its index.
2951 ///
2952 /// Like `ParallelIterator::find_last`, once a match is found,
2953 /// all attempts to the left of the match will be stopped, while
2954 /// attempts to the right must continue in case a later match
2955 /// is found.
2956 ///
2957 /// Note that not all parallel iterators have a useful order, much like
2958 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
2959 /// order doesn't actually matter to you, `position_any` is a better
2960 /// choice.
2961 ///
2962 /// # Examples
2963 ///
2964 /// ```
2965 /// use rayon::prelude::*;
2966 ///
2967 /// let a = [1, 2, 3, 3];
2968 ///
2969 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2970 ///
2971 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2972 /// ```
2973 fn position_last<P>(self, predicate: P) -> Option<usize>
2974 where
2975 P: Fn(Self::Item) -> bool + Sync + Send,
2976 {
2977 #[inline]
2978 fn check(&(_, p): &(usize, bool)) -> bool {
2979 p
2980 }
2981
2982 let (i, _) = self.map(predicate).enumerate().find_last(check)?;
2983 Some(i)
2984 }
2985
2986 #[doc(hidden)]
2987 #[deprecated(
2988 note = "parallel `position` does not search in order -- use `position_any`, \\
2989 `position_first`, or `position_last`"
2990 )]
2991 fn position<P>(self, predicate: P) -> Option<usize>
2992 where
2993 P: Fn(Self::Item) -> bool + Sync + Send,
2994 {
2995 self.position_any(predicate)
2996 }
2997
2998 /// Searches for items in the parallel iterator that match the given
2999 /// predicate, and returns their indices.
3000 ///
3001 /// # Examples
3002 ///
3003 /// ```
3004 /// use rayon::prelude::*;
3005 ///
3006 /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3007 ///
3008 /// // Find the positions of primes congruent to 1 modulo 6
3009 /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3010 /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3011 ///
3012 /// // Find the positions of primes congruent to 5 modulo 6
3013 /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3014 /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3015 /// ```
3016 fn positions<P>(self, predicate: P) -> Positions<Self, P>
3017 where
3018 P: Fn(Self::Item) -> bool + Sync + Send,
3019 {
3020 Positions::new(self, predicate)
3021 }
3022
3023 /// Produces a new iterator with the elements of this iterator in
3024 /// reverse order.
3025 ///
3026 /// # Examples
3027 ///
3028 /// ```
3029 /// use rayon::prelude::*;
3030 ///
3031 /// let result: Vec<_> = (0..5)
3032 /// .into_par_iter()
3033 /// .rev()
3034 /// .collect();
3035 ///
3036 /// assert_eq!(result, [4, 3, 2, 1, 0]);
3037 /// ```
3038 fn rev(self) -> Rev<Self> {
3039 Rev::new(self)
3040 }
3041
3042 /// Sets the minimum length of iterators desired to process in each
3043 /// rayon job. Rayon will not split any smaller than this length, but
3044 /// of course an iterator could already be smaller to begin with.
3045 ///
3046 /// Producers like `zip` and `interleave` will use greater of the two
3047 /// minimums.
3048 /// Chained iterators and iterators inside `flat_map` may each use
3049 /// their own minimum length.
3050 ///
3051 /// # Examples
3052 ///
3053 /// ```
3054 /// use rayon::prelude::*;
3055 ///
3056 /// let min = (0..1_000_000)
3057 /// .into_par_iter()
3058 /// .with_min_len(1234)
3059 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3060 /// .min().unwrap();
3061 ///
3062 /// assert!(min >= 1234);
3063 /// ```
3064 fn with_min_len(self, min: usize) -> MinLen<Self> {
3065 MinLen::new(self, min)
3066 }
3067
3068 /// Sets the maximum length of iterators desired to process in each
3069 /// rayon job. Rayon will try to split at least below this length,
3070 /// unless that would put it below the length from `with_min_len()`.
3071 /// For example, given min=10 and max=15, a length of 16 will not be
3072 /// split any further.
3073 ///
3074 /// Producers like `zip` and `interleave` will use lesser of the two
3075 /// maximums.
3076 /// Chained iterators and iterators inside `flat_map` may each use
3077 /// their own maximum length.
3078 ///
3079 /// # Examples
3080 ///
3081 /// ```
3082 /// use rayon::prelude::*;
3083 ///
3084 /// let max = (0..1_000_000)
3085 /// .into_par_iter()
3086 /// .with_max_len(1234)
3087 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3088 /// .max().unwrap();
3089 ///
3090 /// assert!(max <= 1234);
3091 /// ```
3092 fn with_max_len(self, max: usize) -> MaxLen<Self> {
3093 MaxLen::new(self, max)
3094 }
3095
3096 /// Produces an exact count of how many items this iterator will
3097 /// produce, presuming no panic occurs.
3098 ///
3099 /// # Examples
3100 ///
3101 /// ```
3102 /// use rayon::prelude::*;
3103 ///
3104 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3105 /// assert_eq!(par_iter.len(), 10);
3106 ///
3107 /// let vec: Vec<_> = par_iter.collect();
3108 /// assert_eq!(vec.len(), 10);
3109 /// ```
3110 fn len(&self) -> usize;
3111
3112 /// Internal method used to define the behavior of this parallel
3113 /// iterator. You should not need to call this directly.
3114 ///
3115 /// This method causes the iterator `self` to start producing
3116 /// items and to feed them to the consumer `consumer` one by one.
3117 /// It may split the consumer before doing so to create the
3118 /// opportunity to produce in parallel. If a split does happen, it
3119 /// will inform the consumer of the index where the split should
3120 /// occur (unlike `ParallelIterator::drive_unindexed()`).
3121 ///
3122 /// See the [README] for more details on the internals of parallel
3123 /// iterators.
3124 ///
3125 /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
3126 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3127
3128 /// Internal method used to define the behavior of this parallel
3129 /// iterator. You should not need to call this directly.
3130 ///
3131 /// This method converts the iterator into a producer P and then
3132 /// invokes `callback.callback()` with P. Note that the type of
3133 /// this producer is not defined as part of the API, since
3134 /// `callback` must be defined generically for all producers. This
3135 /// allows the producer type to contain references; it also means
3136 /// that parallel iterators can adjust that type without causing a
3137 /// breaking change.
3138 ///
3139 /// See the [README] for more details on the internals of parallel
3140 /// iterators.
3141 ///
3142 /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
3143 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3144}
3145
3146/// `FromParallelIterator` implements the creation of a collection
3147/// from a [`ParallelIterator`]. By implementing
3148/// `FromParallelIterator` for a given type, you define how it will be
3149/// created from an iterator.
3150///
3151/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3152///
3153/// [`ParallelIterator`]: trait.ParallelIterator.html
3154/// [`collect()`]: trait.ParallelIterator.html#method.collect
3155///
3156/// # Examples
3157///
3158/// Implementing `FromParallelIterator` for your type:
3159///
3160/// ```
3161/// use rayon::prelude::*;
3162/// use std::mem;
3163///
3164/// struct BlackHole {
3165/// mass: usize,
3166/// }
3167///
3168/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3169/// fn from_par_iter<I>(par_iter: I) -> Self
3170/// where I: IntoParallelIterator<Item = T>
3171/// {
3172/// let par_iter = par_iter.into_par_iter();
3173/// BlackHole {
3174/// mass: par_iter.count() * mem::size_of::<T>(),
3175/// }
3176/// }
3177/// }
3178///
3179/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3180/// assert_eq!(bh.mass, 4000);
3181/// ```
3182pub trait FromParallelIterator<T>
3183where
3184 T: Send,
3185{
3186 /// Creates an instance of the collection from the parallel iterator `par_iter`.
3187 ///
3188 /// If your collection is not naturally parallel, the easiest (and
3189 /// fastest) way to do this is often to collect `par_iter` into a
3190 /// [`LinkedList`] or other intermediate data structure and then
3191 /// sequentially extend your collection. However, a more 'native'
3192 /// technique is to use the [`par_iter.fold`] or
3193 /// [`par_iter.fold_with`] methods to create the collection.
3194 /// Alternatively, if your collection is 'natively' parallel, you
3195 /// can use `par_iter.for_each` to process each element in turn.
3196 ///
3197 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
3198 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
3199 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
3200 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
3201 fn from_par_iter<I>(par_iter: I) -> Self
3202 where
3203 I: IntoParallelIterator<Item = T>;
3204}
3205
3206/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3207///
3208/// [`ParallelIterator`]: trait.ParallelIterator.html
3209///
3210/// # Examples
3211///
3212/// Implementing `ParallelExtend` for your type:
3213///
3214/// ```
3215/// use rayon::prelude::*;
3216/// use std::mem;
3217///
3218/// struct BlackHole {
3219/// mass: usize,
3220/// }
3221///
3222/// impl<T: Send> ParallelExtend<T> for BlackHole {
3223/// fn par_extend<I>(&mut self, par_iter: I)
3224/// where I: IntoParallelIterator<Item = T>
3225/// {
3226/// let par_iter = par_iter.into_par_iter();
3227/// self.mass += par_iter.count() * mem::size_of::<T>();
3228/// }
3229/// }
3230///
3231/// let mut bh = BlackHole { mass: 0 };
3232/// bh.par_extend(0i32..1000);
3233/// assert_eq!(bh.mass, 4000);
3234/// bh.par_extend(0i64..10);
3235/// assert_eq!(bh.mass, 4080);
3236/// ```
3237pub trait ParallelExtend<T>
3238where
3239 T: Send,
3240{
3241 /// Extends an instance of the collection with the elements drawn
3242 /// from the parallel iterator `par_iter`.
3243 ///
3244 /// # Examples
3245 ///
3246 /// ```
3247 /// use rayon::prelude::*;
3248 ///
3249 /// let mut vec = vec![];
3250 /// vec.par_extend(0..5);
3251 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3252 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3253 /// ```
3254 fn par_extend<I>(&mut self, par_iter: I)
3255 where
3256 I: IntoParallelIterator<Item = T>;
3257}
3258
3259/// `ParallelDrainFull` creates a parallel iterator that moves all items
3260/// from a collection while retaining the original capacity.
3261///
3262/// Types which are indexable typically implement [`ParallelDrainRange`]
3263/// instead, where you can drain fully with `par_drain(..)`.
3264///
3265/// [`ParallelDrainRange`]: trait.ParallelDrainRange.html
3266pub trait ParallelDrainFull {
3267 /// The draining parallel iterator type that will be created.
3268 type Iter: ParallelIterator<Item = Self::Item>;
3269
3270 /// The type of item that the parallel iterator will produce.
3271 /// This is usually the same as `IntoParallelIterator::Item`.
3272 type Item: Send;
3273
3274 /// Returns a draining parallel iterator over an entire collection.
3275 ///
3276 /// When the iterator is dropped, all items are removed, even if the
3277 /// iterator was not fully consumed. If the iterator is leaked, for example
3278 /// using `std::mem::forget`, it is unspecified how many items are removed.
3279 ///
3280 /// # Examples
3281 ///
3282 /// ```
3283 /// use rayon::prelude::*;
3284 /// use std::collections::{BinaryHeap, HashSet};
3285 ///
3286 /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3287 ///
3288 /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3289 /// assert_eq!(
3290 /// // heaps are drained in arbitrary order
3291 /// heap.par_drain()
3292 /// .inspect(|x| assert!(squares.contains(x)))
3293 /// .count(),
3294 /// squares.len(),
3295 /// );
3296 /// assert!(heap.is_empty());
3297 /// assert!(heap.capacity() >= squares.len());
3298 /// ```
3299 fn par_drain(self) -> Self::Iter;
3300}
3301
3302/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3303/// from a collection while retaining the original capacity.
3304///
3305/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3306///
3307/// [`ParallelDrainFull`]: trait.ParallelDrainFull.html
3308pub trait ParallelDrainRange<Idx = usize> {
3309 /// The draining parallel iterator type that will be created.
3310 type Iter: ParallelIterator<Item = Self::Item>;
3311
3312 /// The type of item that the parallel iterator will produce.
3313 /// This is usually the same as `IntoParallelIterator::Item`.
3314 type Item: Send;
3315
3316 /// Returns a draining parallel iterator over a range of the collection.
3317 ///
3318 /// When the iterator is dropped, all items in the range are removed, even
3319 /// if the iterator was not fully consumed. If the iterator is leaked, for
3320 /// example using `std::mem::forget`, it is unspecified how many items are
3321 /// removed.
3322 ///
3323 /// # Examples
3324 ///
3325 /// ```
3326 /// use rayon::prelude::*;
3327 ///
3328 /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3329 ///
3330 /// println!("RangeFull");
3331 /// let mut vec = squares.clone();
3332 /// assert!(vec.par_drain(..)
3333 /// .eq(squares.par_iter().copied()));
3334 /// assert!(vec.is_empty());
3335 /// assert!(vec.capacity() >= squares.len());
3336 ///
3337 /// println!("RangeFrom");
3338 /// let mut vec = squares.clone();
3339 /// assert!(vec.par_drain(5..)
3340 /// .eq(squares[5..].par_iter().copied()));
3341 /// assert_eq!(&vec[..], &squares[..5]);
3342 /// assert!(vec.capacity() >= squares.len());
3343 ///
3344 /// println!("RangeTo");
3345 /// let mut vec = squares.clone();
3346 /// assert!(vec.par_drain(..5)
3347 /// .eq(squares[..5].par_iter().copied()));
3348 /// assert_eq!(&vec[..], &squares[5..]);
3349 /// assert!(vec.capacity() >= squares.len());
3350 ///
3351 /// println!("RangeToInclusive");
3352 /// let mut vec = squares.clone();
3353 /// assert!(vec.par_drain(..=5)
3354 /// .eq(squares[..=5].par_iter().copied()));
3355 /// assert_eq!(&vec[..], &squares[6..]);
3356 /// assert!(vec.capacity() >= squares.len());
3357 ///
3358 /// println!("Range");
3359 /// let mut vec = squares.clone();
3360 /// assert!(vec.par_drain(3..7)
3361 /// .eq(squares[3..7].par_iter().copied()));
3362 /// assert_eq!(&vec[..3], &squares[..3]);
3363 /// assert_eq!(&vec[3..], &squares[7..]);
3364 /// assert!(vec.capacity() >= squares.len());
3365 ///
3366 /// println!("RangeInclusive");
3367 /// let mut vec = squares.clone();
3368 /// assert!(vec.par_drain(3..=7)
3369 /// .eq(squares[3..=7].par_iter().copied()));
3370 /// assert_eq!(&vec[..3], &squares[..3]);
3371 /// assert_eq!(&vec[3..], &squares[8..]);
3372 /// assert!(vec.capacity() >= squares.len());
3373 /// ```
3374 fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3375}
3376
3377/// We hide the `Try` trait in a private module, as it's only meant to be a
3378/// stable clone of the standard library's `Try` trait, as yet unstable.
3379mod private {
3380 use std::convert::Infallible;
3381 use std::ops::ControlFlow::{self, Break, Continue};
3382 use std::task::Poll;
3383
3384 /// Clone of `std::ops::Try`.
3385 ///
3386 /// Implementing this trait is not permitted outside of `rayon`.
3387 pub trait Try {
3388 private_decl! {}
3389
3390 type Output;
3391 type Residual;
3392
3393 fn from_output(output: Self::Output) -> Self;
3394
3395 fn from_residual(residual: Self::Residual) -> Self;
3396
3397 fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3398 }
3399
3400 impl<B, C> Try for ControlFlow<B, C> {
3401 private_impl! {}
3402
3403 type Output = C;
3404 type Residual = ControlFlow<B, Infallible>;
3405
3406 fn from_output(output: Self::Output) -> Self {
3407 Continue(output)
3408 }
3409
3410 fn from_residual(residual: Self::Residual) -> Self {
3411 match residual {
3412 Break(b) => Break(b),
3413 Continue(_) => unreachable!(),
3414 }
3415 }
3416
3417 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3418 match self {
3419 Continue(c) => Continue(c),
3420 Break(b) => Break(Break(b)),
3421 }
3422 }
3423 }
3424
3425 impl<T> Try for Option<T> {
3426 private_impl! {}
3427
3428 type Output = T;
3429 type Residual = Option<Infallible>;
3430
3431 fn from_output(output: Self::Output) -> Self {
3432 Some(output)
3433 }
3434
3435 fn from_residual(residual: Self::Residual) -> Self {
3436 match residual {
3437 None => None,
3438 Some(_) => unreachable!(),
3439 }
3440 }
3441
3442 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3443 match self {
3444 Some(c) => Continue(c),
3445 None => Break(None),
3446 }
3447 }
3448 }
3449
3450 impl<T, E> Try for Result<T, E> {
3451 private_impl! {}
3452
3453 type Output = T;
3454 type Residual = Result<Infallible, E>;
3455
3456 fn from_output(output: Self::Output) -> Self {
3457 Ok(output)
3458 }
3459
3460 fn from_residual(residual: Self::Residual) -> Self {
3461 match residual {
3462 Err(e) => Err(e),
3463 Ok(_) => unreachable!(),
3464 }
3465 }
3466
3467 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3468 match self {
3469 Ok(c) => Continue(c),
3470 Err(e) => Break(Err(e)),
3471 }
3472 }
3473 }
3474
3475 impl<T, E> Try for Poll<Result<T, E>> {
3476 private_impl! {}
3477
3478 type Output = Poll<T>;
3479 type Residual = Result<Infallible, E>;
3480
3481 fn from_output(output: Self::Output) -> Self {
3482 output.map(Ok)
3483 }
3484
3485 fn from_residual(residual: Self::Residual) -> Self {
3486 match residual {
3487 Err(e) => Poll::Ready(Err(e)),
3488 Ok(_) => unreachable!(),
3489 }
3490 }
3491
3492 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3493 match self {
3494 Poll::Pending => Continue(Poll::Pending),
3495 Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3496 Poll::Ready(Err(e)) => Break(Err(e)),
3497 }
3498 }
3499 }
3500
3501 impl<T, E> Try for Poll<Option<Result<T, E>>> {
3502 private_impl! {}
3503
3504 type Output = Poll<Option<T>>;
3505 type Residual = Result<Infallible, E>;
3506
3507 fn from_output(output: Self::Output) -> Self {
3508 match output {
3509 Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3510 Poll::Pending => Poll::Pending,
3511 }
3512 }
3513
3514 fn from_residual(residual: Self::Residual) -> Self {
3515 match residual {
3516 Err(e) => Poll::Ready(Some(Err(e))),
3517 Ok(_) => unreachable!(),
3518 }
3519 }
3520
3521 fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3522 match self {
3523 Poll::Pending => Continue(Poll::Pending),
3524 Poll::Ready(None) => Continue(Poll::Ready(None)),
3525 Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3526 Poll::Ready(Some(Err(e))) => Break(Err(e)),
3527 }
3528 }
3529 }
3530}