rayon/iter/
mod.rs

1//! Traits for writing parallel programs using an iterator-style interface
2//!
3//! You will rarely need to interact with this module directly unless you have
4//! need to name one of the iterator types.
5//!
6//! Parallel iterators make it easy to write iterator-like chains that
7//! execute in parallel: typically all you have to do is convert the
8//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10//! example, to compute the sum of the squares of a sequence of
11//! integers, one might write:
12//!
13//! ```rust
14//! use rayon::prelude::*;
15//! fn sum_of_squares(input: &[i32]) -> i32 {
16//!     input.par_iter()
17//!          .map(|i| i * i)
18//!          .sum()
19//! }
20//! ```
21//!
22//! Or, to increment all the integers in a slice, you could write:
23//!
24//! ```rust
25//! use rayon::prelude::*;
26//! fn increment_all(input: &mut [i32]) {
27//!     input.par_iter_mut()
28//!          .for_each(|p| *p += 1);
29//! }
30//! ```
31//!
32//! To use parallel iterators, first import the traits by adding
33//! something like `use rayon::prelude::*` to your module. You can
34//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35//! parallel iterator. Like a [regular iterator][], parallel
36//! iterators work by first constructing a computation and then
37//! executing it.
38//!
39//! In addition to `par_iter()` and friends, some types offer other
40//! ways to create (or consume) parallel iterators:
41//!
42//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43//!   `par_windows`, as well as various parallel sorting
44//!   operations. See [the `ParallelSlice` trait] for the full list.
45//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46//!   See [the `ParallelString` trait] for the full list.
47//! - Various collections offer [`par_extend`], which grows a
48//!   collection given a parallel iterator. (If you don't have a
49//!   collection to extend, you can use [`collect()`] to create a new
50//!   one from scratch.)
51//!
52//! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53//! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54//! [`par_extend`]: trait.ParallelExtend.html
55//! [`collect()`]: trait.ParallelIterator.html#method.collect
56//!
57//! To see the full range of methods available on parallel iterators,
58//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59//! traits.
60//!
61//! If you'd like to build a custom parallel iterator, or to write your own
62//! combinator, then check out the [split] function and the [plumbing] module.
63//!
64//! [regular iterator]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
65//! [`ParallelIterator`]: trait.ParallelIterator.html
66//! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67//! [split]: fn.split.html
68//! [plumbing]: plumbing/index.html
69//!
70//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71//! has been deliberately obscured from the public API.  This trait is intended
72//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74//! `None`/`Err` values will exit early.
75//!
76//! A note about object safety: It is currently _not_ possible to wrap
77//! a `ParallelIterator` (or any trait that depends on it) using a
78//! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
79//! because `ParallelIterator` is **not object-safe**.
80//! (This keeps the implementation simpler and allows extra optimizations.)
81
82use self::plumbing::*;
83use self::private::Try;
84pub use either::Either;
85use std::cmp::{self, Ordering};
86use std::iter::{Product, Sum};
87use std::ops::{Fn, RangeBounds};
88
89pub mod plumbing;
90
91#[cfg(test)]
92mod test;
93
94// There is a method to the madness here:
95//
96// - These modules are private but expose certain types to the end-user
97//   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
98//   public API surface of the `ParallelIterator` traits.
99// - In **this** module, those public types are always used unprefixed, which forces
100//   us to add a `pub use` and helps identify if we missed anything.
101// - In contrast, items that appear **only** in the body of a method,
102//   e.g. `find::find()`, are always used **prefixed**, so that they
103//   can be readily distinguished.
104
105mod chain;
106mod chunks;
107mod cloned;
108mod collect;
109mod copied;
110mod empty;
111mod enumerate;
112mod extend;
113mod filter;
114mod filter_map;
115mod find;
116mod find_first_last;
117mod flat_map;
118mod flat_map_iter;
119mod flatten;
120mod flatten_iter;
121mod fold;
122mod fold_chunks;
123mod fold_chunks_with;
124mod for_each;
125mod from_par_iter;
126mod inspect;
127mod interleave;
128mod interleave_shortest;
129mod intersperse;
130mod len;
131mod map;
132mod map_with;
133mod multizip;
134mod noop;
135mod once;
136mod panic_fuse;
137mod par_bridge;
138mod positions;
139mod product;
140mod reduce;
141mod repeat;
142mod rev;
143mod skip;
144mod skip_any;
145mod skip_any_while;
146mod splitter;
147mod step_by;
148mod sum;
149mod take;
150mod take_any;
151mod take_any_while;
152mod try_fold;
153mod try_reduce;
154mod try_reduce_with;
155mod unzip;
156mod update;
157mod while_some;
158mod zip;
159mod zip_eq;
160
161pub use self::{
162    chain::Chain,
163    chunks::Chunks,
164    cloned::Cloned,
165    copied::Copied,
166    empty::{empty, Empty},
167    enumerate::Enumerate,
168    filter::Filter,
169    filter_map::FilterMap,
170    flat_map::FlatMap,
171    flat_map_iter::FlatMapIter,
172    flatten::Flatten,
173    flatten_iter::FlattenIter,
174    fold::{Fold, FoldWith},
175    fold_chunks::FoldChunks,
176    fold_chunks_with::FoldChunksWith,
177    inspect::Inspect,
178    interleave::Interleave,
179    interleave_shortest::InterleaveShortest,
180    intersperse::Intersperse,
181    len::{MaxLen, MinLen},
182    map::Map,
183    map_with::{MapInit, MapWith},
184    multizip::MultiZip,
185    once::{once, Once},
186    panic_fuse::PanicFuse,
187    par_bridge::{IterBridge, ParallelBridge},
188    positions::Positions,
189    repeat::{repeat, repeatn, Repeat, RepeatN},
190    rev::Rev,
191    skip::Skip,
192    skip_any::SkipAny,
193    skip_any_while::SkipAnyWhile,
194    splitter::{split, Split},
195    step_by::StepBy,
196    take::Take,
197    take_any::TakeAny,
198    take_any_while::TakeAnyWhile,
199    try_fold::{TryFold, TryFoldWith},
200    update::Update,
201    while_some::WhileSome,
202    zip::Zip,
203    zip_eq::ZipEq,
204};
205
206/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
207///
208/// By implementing `IntoParallelIterator` for a type, you define how it will
209/// transformed into an iterator. This is a parallel version of the standard
210/// library's [`std::iter::IntoIterator`] trait.
211///
212/// [`ParallelIterator`]: trait.ParallelIterator.html
213/// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
214pub trait IntoParallelIterator {
215    /// The parallel iterator type that will be created.
216    type Iter: ParallelIterator<Item = Self::Item>;
217
218    /// The type of item that the parallel iterator will produce.
219    type Item: Send;
220
221    /// Converts `self` into a parallel iterator.
222    ///
223    /// # Examples
224    ///
225    /// ```
226    /// use rayon::prelude::*;
227    ///
228    /// println!("counting in parallel:");
229    /// (0..100).into_par_iter()
230    ///     .for_each(|i| println!("{}", i));
231    /// ```
232    ///
233    /// This conversion is often implicit for arguments to methods like [`zip`].
234    ///
235    /// ```
236    /// use rayon::prelude::*;
237    ///
238    /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
239    /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
240    /// ```
241    ///
242    /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
243    fn into_par_iter(self) -> Self::Iter;
244}
245
246/// `IntoParallelRefIterator` implements the conversion to a
247/// [`ParallelIterator`], providing shared references to the data.
248///
249/// This is a parallel version of the `iter()` method
250/// defined by various collections.
251///
252/// This trait is automatically implemented
253/// `for I where &I: IntoParallelIterator`. In most cases, users
254/// will want to implement [`IntoParallelIterator`] rather than implement
255/// this trait directly.
256///
257/// [`ParallelIterator`]: trait.ParallelIterator.html
258/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
259pub trait IntoParallelRefIterator<'data> {
260    /// The type of the parallel iterator that will be returned.
261    type Iter: ParallelIterator<Item = Self::Item>;
262
263    /// The type of item that the parallel iterator will produce.
264    /// This will typically be an `&'data T` reference type.
265    type Item: Send + 'data;
266
267    /// Converts `self` into a parallel iterator.
268    ///
269    /// # Examples
270    ///
271    /// ```
272    /// use rayon::prelude::*;
273    ///
274    /// let v: Vec<_> = (0..100).collect();
275    /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
276    ///
277    /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
278    /// // producing the exact same references.
279    /// assert!(v.par_iter().zip(&v)
280    ///          .all(|(a, b)| std::ptr::eq(a, b)));
281    /// ```
282    fn par_iter(&'data self) -> Self::Iter;
283}
284
285impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
286where
287    &'data I: IntoParallelIterator,
288{
289    type Iter = <&'data I as IntoParallelIterator>::Iter;
290    type Item = <&'data I as IntoParallelIterator>::Item;
291
292    fn par_iter(&'data self) -> Self::Iter {
293        self.into_par_iter()
294    }
295}
296
297/// `IntoParallelRefMutIterator` implements the conversion to a
298/// [`ParallelIterator`], providing mutable references to the data.
299///
300/// This is a parallel version of the `iter_mut()` method
301/// defined by various collections.
302///
303/// This trait is automatically implemented
304/// `for I where &mut I: IntoParallelIterator`. In most cases, users
305/// will want to implement [`IntoParallelIterator`] rather than implement
306/// this trait directly.
307///
308/// [`ParallelIterator`]: trait.ParallelIterator.html
309/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
310pub trait IntoParallelRefMutIterator<'data> {
311    /// The type of iterator that will be created.
312    type Iter: ParallelIterator<Item = Self::Item>;
313
314    /// The type of item that will be produced; this is typically an
315    /// `&'data mut T` reference.
316    type Item: Send + 'data;
317
318    /// Creates the parallel iterator from `self`.
319    ///
320    /// # Examples
321    ///
322    /// ```
323    /// use rayon::prelude::*;
324    ///
325    /// let mut v = vec![0usize; 5];
326    /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
327    /// assert_eq!(v, [0, 1, 2, 3, 4]);
328    /// ```
329    fn par_iter_mut(&'data mut self) -> Self::Iter;
330}
331
332impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
333where
334    &'data mut I: IntoParallelIterator,
335{
336    type Iter = <&'data mut I as IntoParallelIterator>::Iter;
337    type Item = <&'data mut I as IntoParallelIterator>::Item;
338
339    fn par_iter_mut(&'data mut self) -> Self::Iter {
340        self.into_par_iter()
341    }
342}
343
344/// Parallel version of the standard iterator trait.
345///
346/// The combinators on this trait are available on **all** parallel
347/// iterators.  Additional methods can be found on the
348/// [`IndexedParallelIterator`] trait: those methods are only
349/// available for parallel iterators where the number of items is
350/// known in advance (so, e.g., after invoking `filter`, those methods
351/// become unavailable).
352///
353/// For examples of using parallel iterators, see [the docs on the
354/// `iter` module][iter].
355///
356/// [iter]: index.html
357/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
358pub trait ParallelIterator: Sized + Send {
359    /// The type of item that this parallel iterator produces.
360    /// For example, if you use the [`for_each`] method, this is the type of
361    /// item that your closure will be invoked with.
362    ///
363    /// [`for_each`]: #method.for_each
364    type Item: Send;
365
366    /// Executes `OP` on each item produced by the iterator, in parallel.
367    ///
368    /// # Examples
369    ///
370    /// ```
371    /// use rayon::prelude::*;
372    ///
373    /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
374    /// ```
375    fn for_each<OP>(self, op: OP)
376    where
377        OP: Fn(Self::Item) + Sync + Send,
378    {
379        for_each::for_each(self, &op)
380    }
381
382    /// Executes `OP` on the given `init` value with each item produced by
383    /// the iterator, in parallel.
384    ///
385    /// The `init` value will be cloned only as needed to be paired with
386    /// the group of items in each rayon job.  It does not require the type
387    /// to be `Sync`.
388    ///
389    /// # Examples
390    ///
391    /// ```
392    /// use std::sync::mpsc::channel;
393    /// use rayon::prelude::*;
394    ///
395    /// let (sender, receiver) = channel();
396    ///
397    /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
398    ///
399    /// let mut res: Vec<_> = receiver.iter().collect();
400    ///
401    /// res.sort();
402    ///
403    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
404    /// ```
405    fn for_each_with<OP, T>(self, init: T, op: OP)
406    where
407        OP: Fn(&mut T, Self::Item) + Sync + Send,
408        T: Send + Clone,
409    {
410        self.map_with(init, op).collect()
411    }
412
413    /// Executes `OP` on a value returned by `init` with each item produced by
414    /// the iterator, in parallel.
415    ///
416    /// The `init` function will be called only as needed for a value to be
417    /// paired with the group of items in each rayon job.  There is no
418    /// constraint on that returned type at all!
419    ///
420    /// # Examples
421    ///
422    /// ```
423    /// use rand::Rng;
424    /// use rayon::prelude::*;
425    ///
426    /// let mut v = vec![0u8; 1_000_000];
427    ///
428    /// v.par_chunks_mut(1000)
429    ///     .for_each_init(
430    ///         || rand::thread_rng(),
431    ///         |rng, chunk| rng.fill(chunk),
432    ///     );
433    ///
434    /// // There's a remote chance that this will fail...
435    /// for i in 0u8..=255 {
436    ///     assert!(v.contains(&i));
437    /// }
438    /// ```
439    fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
440    where
441        OP: Fn(&mut T, Self::Item) + Sync + Send,
442        INIT: Fn() -> T + Sync + Send,
443    {
444        self.map_init(init, op).collect()
445    }
446
447    /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
448    ///
449    /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
450    /// stop processing the rest of the items in the iterator as soon as
451    /// possible, and we will return that terminating value.  Otherwise, we will
452    /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
453    /// multiple errors in parallel, it is not specified which will be returned.
454    ///
455    /// # Examples
456    ///
457    /// ```
458    /// use rayon::prelude::*;
459    /// use std::io::{self, Write};
460    ///
461    /// // This will stop iteration early if there's any write error, like
462    /// // having piped output get closed on the other end.
463    /// (0..100).into_par_iter()
464    ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
465    ///     .expect("expected no write errors");
466    /// ```
467    fn try_for_each<OP, R>(self, op: OP) -> R
468    where
469        OP: Fn(Self::Item) -> R + Sync + Send,
470        R: Try<Output = ()> + Send,
471    {
472        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
473            R::from_output(())
474        }
475
476        self.map(op).try_reduce(<()>::default, ok)
477    }
478
479    /// Executes a fallible `OP` on the given `init` value with each item
480    /// produced by the iterator, in parallel.
481    ///
482    /// This combines the `init` semantics of [`for_each_with()`] and the
483    /// failure semantics of [`try_for_each()`].
484    ///
485    /// [`for_each_with()`]: #method.for_each_with
486    /// [`try_for_each()`]: #method.try_for_each
487    ///
488    /// # Examples
489    ///
490    /// ```
491    /// use std::sync::mpsc::channel;
492    /// use rayon::prelude::*;
493    ///
494    /// let (sender, receiver) = channel();
495    ///
496    /// (0..5).into_par_iter()
497    ///     .try_for_each_with(sender, |s, x| s.send(x))
498    ///     .expect("expected no send errors");
499    ///
500    /// let mut res: Vec<_> = receiver.iter().collect();
501    ///
502    /// res.sort();
503    ///
504    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
505    /// ```
506    fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
507    where
508        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
509        T: Send + Clone,
510        R: Try<Output = ()> + Send,
511    {
512        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
513            R::from_output(())
514        }
515
516        self.map_with(init, op).try_reduce(<()>::default, ok)
517    }
518
519    /// Executes a fallible `OP` on a value returned by `init` with each item
520    /// produced by the iterator, in parallel.
521    ///
522    /// This combines the `init` semantics of [`for_each_init()`] and the
523    /// failure semantics of [`try_for_each()`].
524    ///
525    /// [`for_each_init()`]: #method.for_each_init
526    /// [`try_for_each()`]: #method.try_for_each
527    ///
528    /// # Examples
529    ///
530    /// ```
531    /// use rand::Rng;
532    /// use rayon::prelude::*;
533    ///
534    /// let mut v = vec![0u8; 1_000_000];
535    ///
536    /// v.par_chunks_mut(1000)
537    ///     .try_for_each_init(
538    ///         || rand::thread_rng(),
539    ///         |rng, chunk| rng.try_fill(chunk),
540    ///     )
541    ///     .expect("expected no rand errors");
542    ///
543    /// // There's a remote chance that this will fail...
544    /// for i in 0u8..=255 {
545    ///     assert!(v.contains(&i));
546    /// }
547    /// ```
548    fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
549    where
550        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
551        INIT: Fn() -> T + Sync + Send,
552        R: Try<Output = ()> + Send,
553    {
554        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
555            R::from_output(())
556        }
557
558        self.map_init(init, op).try_reduce(<()>::default, ok)
559    }
560
561    /// Counts the number of items in this parallel iterator.
562    ///
563    /// # Examples
564    ///
565    /// ```
566    /// use rayon::prelude::*;
567    ///
568    /// let count = (0..100).into_par_iter().count();
569    ///
570    /// assert_eq!(count, 100);
571    /// ```
572    fn count(self) -> usize {
573        fn one<T>(_: T) -> usize {
574            1
575        }
576
577        self.map(one).sum()
578    }
579
580    /// Applies `map_op` to each item of this iterator, producing a new
581    /// iterator with the results.
582    ///
583    /// # Examples
584    ///
585    /// ```
586    /// use rayon::prelude::*;
587    ///
588    /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
589    ///
590    /// let doubles: Vec<_> = par_iter.collect();
591    ///
592    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
593    /// ```
594    fn map<F, R>(self, map_op: F) -> Map<Self, F>
595    where
596        F: Fn(Self::Item) -> R + Sync + Send,
597        R: Send,
598    {
599        Map::new(self, map_op)
600    }
601
602    /// Applies `map_op` to the given `init` value with each item of this
603    /// iterator, producing a new iterator with the results.
604    ///
605    /// The `init` value will be cloned only as needed to be paired with
606    /// the group of items in each rayon job.  It does not require the type
607    /// to be `Sync`.
608    ///
609    /// # Examples
610    ///
611    /// ```
612    /// use std::sync::mpsc::channel;
613    /// use rayon::prelude::*;
614    ///
615    /// let (sender, receiver) = channel();
616    ///
617    /// let a: Vec<_> = (0..5)
618    ///                 .into_par_iter()            // iterating over i32
619    ///                 .map_with(sender, |s, x| {
620    ///                     s.send(x).unwrap();     // sending i32 values through the channel
621    ///                     x                       // returning i32
622    ///                 })
623    ///                 .collect();                 // collecting the returned values into a vector
624    ///
625    /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
626    ///                             .collect();     // and collecting them
627    /// b.sort();
628    ///
629    /// assert_eq!(a, b);
630    /// ```
631    fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
632    where
633        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
634        T: Send + Clone,
635        R: Send,
636    {
637        MapWith::new(self, init, map_op)
638    }
639
640    /// Applies `map_op` to a value returned by `init` with each item of this
641    /// iterator, producing a new iterator with the results.
642    ///
643    /// The `init` function will be called only as needed for a value to be
644    /// paired with the group of items in each rayon job.  There is no
645    /// constraint on that returned type at all!
646    ///
647    /// # Examples
648    ///
649    /// ```
650    /// use rand::Rng;
651    /// use rayon::prelude::*;
652    ///
653    /// let a: Vec<_> = (1i32..1_000_000)
654    ///     .into_par_iter()
655    ///     .map_init(
656    ///         || rand::thread_rng(),  // get the thread-local RNG
657    ///         |rng, x| if rng.gen() { // randomly negate items
658    ///             -x
659    ///         } else {
660    ///             x
661    ///         },
662    ///     ).collect();
663    ///
664    /// // There's a remote chance that this will fail...
665    /// assert!(a.iter().any(|&x| x < 0));
666    /// assert!(a.iter().any(|&x| x > 0));
667    /// ```
668    fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
669    where
670        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
671        INIT: Fn() -> T + Sync + Send,
672        R: Send,
673    {
674        MapInit::new(self, init, map_op)
675    }
676
677    /// Creates an iterator which clones all of its elements.  This may be
678    /// useful when you have an iterator over `&T`, but you need `T`, and
679    /// that type implements `Clone`. See also [`copied()`].
680    ///
681    /// [`copied()`]: #method.copied
682    ///
683    /// # Examples
684    ///
685    /// ```
686    /// use rayon::prelude::*;
687    ///
688    /// let a = [1, 2, 3];
689    ///
690    /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
691    ///
692    /// // cloned is the same as .map(|&x| x), for integers
693    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
694    ///
695    /// assert_eq!(v_cloned, vec![1, 2, 3]);
696    /// assert_eq!(v_map, vec![1, 2, 3]);
697    /// ```
698    fn cloned<'a, T>(self) -> Cloned<Self>
699    where
700        T: 'a + Clone + Send,
701        Self: ParallelIterator<Item = &'a T>,
702    {
703        Cloned::new(self)
704    }
705
706    /// Creates an iterator which copies all of its elements.  This may be
707    /// useful when you have an iterator over `&T`, but you need `T`, and
708    /// that type implements `Copy`. See also [`cloned()`].
709    ///
710    /// [`cloned()`]: #method.cloned
711    ///
712    /// # Examples
713    ///
714    /// ```
715    /// use rayon::prelude::*;
716    ///
717    /// let a = [1, 2, 3];
718    ///
719    /// let v_copied: Vec<_> = a.par_iter().copied().collect();
720    ///
721    /// // copied is the same as .map(|&x| x), for integers
722    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
723    ///
724    /// assert_eq!(v_copied, vec![1, 2, 3]);
725    /// assert_eq!(v_map, vec![1, 2, 3]);
726    /// ```
727    fn copied<'a, T>(self) -> Copied<Self>
728    where
729        T: 'a + Copy + Send,
730        Self: ParallelIterator<Item = &'a T>,
731    {
732        Copied::new(self)
733    }
734
735    /// Applies `inspect_op` to a reference to each item of this iterator,
736    /// producing a new iterator passing through the original items.  This is
737    /// often useful for debugging to see what's happening in iterator stages.
738    ///
739    /// # Examples
740    ///
741    /// ```
742    /// use rayon::prelude::*;
743    ///
744    /// let a = [1, 4, 2, 3];
745    ///
746    /// // this iterator sequence is complex.
747    /// let sum = a.par_iter()
748    ///             .cloned()
749    ///             .filter(|&x| x % 2 == 0)
750    ///             .reduce(|| 0, |sum, i| sum + i);
751    ///
752    /// println!("{}", sum);
753    ///
754    /// // let's add some inspect() calls to investigate what's happening
755    /// let sum = a.par_iter()
756    ///             .cloned()
757    ///             .inspect(|x| println!("about to filter: {}", x))
758    ///             .filter(|&x| x % 2 == 0)
759    ///             .inspect(|x| println!("made it through filter: {}", x))
760    ///             .reduce(|| 0, |sum, i| sum + i);
761    ///
762    /// println!("{}", sum);
763    /// ```
764    fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
765    where
766        OP: Fn(&Self::Item) + Sync + Send,
767    {
768        Inspect::new(self, inspect_op)
769    }
770
771    /// Mutates each item of this iterator before yielding it.
772    ///
773    /// # Examples
774    ///
775    /// ```
776    /// use rayon::prelude::*;
777    ///
778    /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
779    ///
780    /// let doubles: Vec<_> = par_iter.collect();
781    ///
782    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
783    /// ```
784    fn update<F>(self, update_op: F) -> Update<Self, F>
785    where
786        F: Fn(&mut Self::Item) + Sync + Send,
787    {
788        Update::new(self, update_op)
789    }
790
791    /// Applies `filter_op` to each item of this iterator, producing a new
792    /// iterator with only the items that gave `true` results.
793    ///
794    /// # Examples
795    ///
796    /// ```
797    /// use rayon::prelude::*;
798    ///
799    /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
800    ///
801    /// let even_numbers: Vec<_> = par_iter.collect();
802    ///
803    /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
804    /// ```
805    fn filter<P>(self, filter_op: P) -> Filter<Self, P>
806    where
807        P: Fn(&Self::Item) -> bool + Sync + Send,
808    {
809        Filter::new(self, filter_op)
810    }
811
812    /// Applies `filter_op` to each item of this iterator to get an `Option`,
813    /// producing a new iterator with only the items from `Some` results.
814    ///
815    /// # Examples
816    ///
817    /// ```
818    /// use rayon::prelude::*;
819    ///
820    /// let mut par_iter = (0..10).into_par_iter()
821    ///                         .filter_map(|x| {
822    ///                             if x % 2 == 0 { Some(x * 3) }
823    ///                             else { None }
824    ///                         });
825    ///
826    /// let even_numbers: Vec<_> = par_iter.collect();
827    ///
828    /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
829    /// ```
830    fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
831    where
832        P: Fn(Self::Item) -> Option<R> + Sync + Send,
833        R: Send,
834    {
835        FilterMap::new(self, filter_op)
836    }
837
838    /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
839    /// producing a new parallel iterator that flattens these back into one.
840    ///
841    /// See also [`flat_map_iter`](#method.flat_map_iter).
842    ///
843    /// # Examples
844    ///
845    /// ```
846    /// use rayon::prelude::*;
847    ///
848    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
849    ///
850    /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
851    ///
852    /// let vec: Vec<_> = par_iter.collect();
853    ///
854    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
855    /// ```
856    fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
857    where
858        F: Fn(Self::Item) -> PI + Sync + Send,
859        PI: IntoParallelIterator,
860    {
861        FlatMap::new(self, map_op)
862    }
863
864    /// Applies `map_op` to each item of this iterator to get nested serial iterators,
865    /// producing a new parallel iterator that flattens these back into one.
866    ///
867    /// # `flat_map_iter` versus `flat_map`
868    ///
869    /// These two methods are similar but behave slightly differently. With [`flat_map`],
870    /// each of the nested iterators must be a parallel iterator, and they will be further
871    /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
872    /// sequential `Iterator`, and we only parallelize _between_ them, while the items
873    /// produced by each nested iterator are processed sequentially.
874    ///
875    /// When choosing between these methods, consider whether nested parallelism suits the
876    /// potential iterators at hand. If there's little computation involved, or its length
877    /// is much less than the outer parallel iterator, then it may perform better to avoid
878    /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
879    /// If there is a lot of computation, potentially outweighing the outer parallel
880    /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
881    ///
882    /// [`flat_map`]: #method.flat_map
883    ///
884    /// # Examples
885    ///
886    /// ```
887    /// use rayon::prelude::*;
888    /// use std::cell::RefCell;
889    ///
890    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
891    ///
892    /// let par_iter = a.par_iter().flat_map_iter(|a| {
893    ///     // The serial iterator doesn't have to be thread-safe, just its items.
894    ///     let cell_iter = RefCell::new(a.iter().cloned());
895    ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
896    /// });
897    ///
898    /// let vec: Vec<_> = par_iter.collect();
899    ///
900    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
901    /// ```
902    fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
903    where
904        F: Fn(Self::Item) -> SI + Sync + Send,
905        SI: IntoIterator,
906        SI::Item: Send,
907    {
908        FlatMapIter::new(self, map_op)
909    }
910
911    /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
912    ///
913    /// See also [`flatten_iter`](#method.flatten_iter).
914    ///
915    /// # Examples
916    ///
917    /// ```
918    /// use rayon::prelude::*;
919    ///
920    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
921    /// let y: Vec<_> = x.into_par_iter().flatten().collect();
922    ///
923    /// assert_eq!(y, vec![1, 2, 3, 4]);
924    /// ```
925    fn flatten(self) -> Flatten<Self>
926    where
927        Self::Item: IntoParallelIterator,
928    {
929        Flatten::new(self)
930    }
931
932    /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
933    ///
934    /// See also [`flatten`](#method.flatten) and the analogous comparison of
935    /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
936    ///
937    /// # Examples
938    ///
939    /// ```
940    /// use rayon::prelude::*;
941    ///
942    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
943    /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
944    /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
945    ///
946    /// assert_eq!(y, vec![1, 2, 3, 4]);
947    /// ```
948    fn flatten_iter(self) -> FlattenIter<Self>
949    where
950        Self::Item: IntoIterator,
951        <Self::Item as IntoIterator>::Item: Send,
952    {
953        FlattenIter::new(self)
954    }
955
956    /// Reduces the items in the iterator into one item using `op`.
957    /// The argument `identity` should be a closure that can produce
958    /// "identity" value which may be inserted into the sequence as
959    /// needed to create opportunities for parallel execution. So, for
960    /// example, if you are doing a summation, then `identity()` ought
961    /// to produce something that represents the zero for your type
962    /// (but consider just calling `sum()` in that case).
963    ///
964    /// # Examples
965    ///
966    /// ```
967    /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
968    /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
969    /// // where the first/second elements are summed separately.
970    /// use rayon::prelude::*;
971    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
972    ///            .par_iter()        // iterating over &(i32, i32)
973    ///            .cloned()          // iterating over (i32, i32)
974    ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
975    ///                    |a, b| (a.0 + b.0, a.1 + b.1));
976    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
977    /// ```
978    ///
979    /// **Note:** unlike a sequential `fold` operation, the order in
980    /// which `op` will be applied to reduce the result is not fully
981    /// specified. So `op` should be [associative] or else the results
982    /// will be non-deterministic. And of course `identity()` should
983    /// produce a true identity.
984    ///
985    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
986    fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
987    where
988        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
989        ID: Fn() -> Self::Item + Sync + Send,
990    {
991        reduce::reduce(self, identity, op)
992    }
993
994    /// Reduces the items in the iterator into one item using `op`.
995    /// If the iterator is empty, `None` is returned; otherwise,
996    /// `Some` is returned.
997    ///
998    /// This version of `reduce` is simple but somewhat less
999    /// efficient. If possible, it is better to call `reduce()`, which
1000    /// requires an identity element.
1001    ///
1002    /// # Examples
1003    ///
1004    /// ```
1005    /// use rayon::prelude::*;
1006    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1007    ///            .par_iter()        // iterating over &(i32, i32)
1008    ///            .cloned()          // iterating over (i32, i32)
1009    ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1010    ///            .unwrap();
1011    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1012    /// ```
1013    ///
1014    /// **Note:** unlike a sequential `fold` operation, the order in
1015    /// which `op` will be applied to reduce the result is not fully
1016    /// specified. So `op` should be [associative] or else the results
1017    /// will be non-deterministic.
1018    ///
1019    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1020    fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1021    where
1022        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1023    {
1024        fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1025            move |opt_a, b| match opt_a {
1026                Some(a) => Some(op(a, b)),
1027                None => Some(b),
1028            }
1029        }
1030
1031        fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1032            move |opt_a, opt_b| match (opt_a, opt_b) {
1033                (Some(a), Some(b)) => Some(op(a, b)),
1034                (Some(v), None) | (None, Some(v)) => Some(v),
1035                (None, None) => None,
1036            }
1037        }
1038
1039        self.fold(<_>::default, opt_fold(&op))
1040            .reduce(<_>::default, opt_reduce(&op))
1041    }
1042
1043    /// Reduces the items in the iterator into one item using a fallible `op`.
1044    /// The `identity` argument is used the same way as in [`reduce()`].
1045    ///
1046    /// [`reduce()`]: #method.reduce
1047    ///
1048    /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1049    /// to one, we will attempt to stop processing the rest of the items in the
1050    /// iterator as soon as possible, and we will return that terminating value.
1051    /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1052    /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1053    /// specified which will be returned.
1054    ///
1055    /// # Examples
1056    ///
1057    /// ```
1058    /// use rayon::prelude::*;
1059    ///
1060    /// // Compute the sum of squares, being careful about overflow.
1061    /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1062    ///     iter.into_par_iter()
1063    ///         .map(|i| i.checked_mul(i))            // square each item,
1064    ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1065    /// }
1066    /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1067    ///
1068    /// // The sum might overflow
1069    /// assert_eq!(sum_squares(0..10_000), None);
1070    ///
1071    /// // Or the squares might overflow before it even reaches `try_reduce`
1072    /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1073    /// ```
1074    fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1075    where
1076        OP: Fn(T, T) -> Self::Item + Sync + Send,
1077        ID: Fn() -> T + Sync + Send,
1078        Self::Item: Try<Output = T>,
1079    {
1080        try_reduce::try_reduce(self, identity, op)
1081    }
1082
1083    /// Reduces the items in the iterator into one item using a fallible `op`.
1084    ///
1085    /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1086    /// otherwise, `Some` is returned.  Beyond that, it behaves like
1087    /// [`try_reduce()`] for handling `Err`/`None`.
1088    ///
1089    /// [`reduce_with()`]: #method.reduce_with
1090    /// [`try_reduce()`]: #method.try_reduce
1091    ///
1092    /// For instance, with `Option` items, the return value may be:
1093    /// - `None`, the iterator was empty
1094    /// - `Some(None)`, we stopped after encountering `None`.
1095    /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1096    ///
1097    /// With `Result` items, the nesting is more obvious:
1098    /// - `None`, the iterator was empty
1099    /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1100    /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1101    ///
1102    /// # Examples
1103    ///
1104    /// ```
1105    /// use rayon::prelude::*;
1106    ///
1107    /// let files = ["/dev/null", "/does/not/exist"];
1108    ///
1109    /// // Find the biggest file
1110    /// files.into_par_iter()
1111    ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1112    ///     .try_reduce_with(|a, b| {
1113    ///         Ok(if a.1 >= b.1 { a } else { b })
1114    ///     })
1115    ///     .expect("Some value, since the iterator is not empty")
1116    ///     .expect_err("not found");
1117    /// ```
1118    fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1119    where
1120        OP: Fn(T, T) -> Self::Item + Sync + Send,
1121        Self::Item: Try<Output = T>,
1122    {
1123        try_reduce_with::try_reduce_with(self, op)
1124    }
1125
1126    /// Parallel fold is similar to sequential fold except that the
1127    /// sequence of items may be subdivided before it is
1128    /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1129    /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1130    /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1131    /// 77, and so forth. The **parallel fold** works similarly except
1132    /// that it first breaks up your list into sublists, and hence
1133    /// instead of yielding up a single sum at the end, it yields up
1134    /// multiple sums. The number of results is nondeterministic, as
1135    /// is the point where the breaks occur.
1136    ///
1137    /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1138    /// our example list, we might wind up with a sequence of two numbers,
1139    /// like so:
1140    ///
1141    /// ```notrust
1142    /// 22 3 77 89 46
1143    ///       |     |
1144    ///     102   135
1145    /// ```
1146    ///
1147    /// Or perhaps these three numbers:
1148    ///
1149    /// ```notrust
1150    /// 22 3 77 89 46
1151    ///       |  |  |
1152    ///     102 89 46
1153    /// ```
1154    ///
1155    /// In general, Rayon will attempt to find good breaking points
1156    /// that keep all of your cores busy.
1157    ///
1158    /// ### Fold versus reduce
1159    ///
1160    /// The `fold()` and `reduce()` methods each take an identity element
1161    /// and a combining function, but they operate rather differently.
1162    ///
1163    /// `reduce()` requires that the identity function has the same
1164    /// type as the things you are iterating over, and it fully
1165    /// reduces the list of items into a single item. So, for example,
1166    /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1167    /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1168    /// u8| a + b)`, we would get an overflow. This is because `0`,
1169    /// `a`, and `b` here are all bytes, just like the numbers in the
1170    /// list (I wrote the types explicitly above, but those are the
1171    /// only types you can use). To avoid the overflow, we would need
1172    /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1173    /// b| a + b)`, in which case our result would be `256`.
1174    ///
1175    /// In contrast, with `fold()`, the identity function does not
1176    /// have to have the same type as the things you are iterating
1177    /// over, and you potentially get back many results. So, if we
1178    /// continue with the `bytes` example from the previous paragraph,
1179    /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1180    /// convert our bytes into `u32`. And of course we might not get
1181    /// back a single sum.
1182    ///
1183    /// There is a more subtle distinction as well, though it's
1184    /// actually implied by the above points. When you use `reduce()`,
1185    /// your reduction function is sometimes called with values that
1186    /// were never part of your original parallel iterator (for
1187    /// example, both the left and right might be a partial sum). With
1188    /// `fold()`, in contrast, the left value in the fold function is
1189    /// always the accumulator, and the right value is always from
1190    /// your original sequence.
1191    ///
1192    /// ### Fold vs Map/Reduce
1193    ///
1194    /// Fold makes sense if you have some operation where it is
1195    /// cheaper to create groups of elements at a time. For example,
1196    /// imagine collecting characters into a string. If you were going
1197    /// to use map/reduce, you might try this:
1198    ///
1199    /// ```
1200    /// use rayon::prelude::*;
1201    ///
1202    /// let s =
1203    ///     ['a', 'b', 'c', 'd', 'e']
1204    ///     .par_iter()
1205    ///     .map(|c: &char| format!("{}", c))
1206    ///     .reduce(|| String::new(),
1207    ///             |mut a: String, b: String| { a.push_str(&b); a });
1208    ///
1209    /// assert_eq!(s, "abcde");
1210    /// ```
1211    ///
1212    /// Because reduce produces the same type of element as its input,
1213    /// you have to first map each character into a string, and then
1214    /// you can reduce them. This means we create one string per
1215    /// element in our iterator -- not so great. Using `fold`, we can
1216    /// do this instead:
1217    ///
1218    /// ```
1219    /// use rayon::prelude::*;
1220    ///
1221    /// let s =
1222    ///     ['a', 'b', 'c', 'd', 'e']
1223    ///     .par_iter()
1224    ///     .fold(|| String::new(),
1225    ///             |mut s: String, c: &char| { s.push(*c); s })
1226    ///     .reduce(|| String::new(),
1227    ///             |mut a: String, b: String| { a.push_str(&b); a });
1228    ///
1229    /// assert_eq!(s, "abcde");
1230    /// ```
1231    ///
1232    /// Now `fold` will process groups of our characters at a time,
1233    /// and we only make one string per group. We should wind up with
1234    /// some small-ish number of strings roughly proportional to the
1235    /// number of CPUs you have (it will ultimately depend on how busy
1236    /// your processors are). Note that we still need to do a reduce
1237    /// afterwards to combine those groups of strings into a single
1238    /// string.
1239    ///
1240    /// You could use a similar trick to save partial results (e.g., a
1241    /// cache) or something similar.
1242    ///
1243    /// ### Combining fold with other operations
1244    ///
1245    /// You can combine `fold` with `reduce` if you want to produce a
1246    /// single value. This is then roughly equivalent to a map/reduce
1247    /// combination in effect:
1248    ///
1249    /// ```
1250    /// use rayon::prelude::*;
1251    ///
1252    /// let bytes = 0..22_u8;
1253    /// let sum = bytes.into_par_iter()
1254    ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1255    ///                .sum::<u32>();
1256    ///
1257    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1258    /// ```
1259    fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1260    where
1261        F: Fn(T, Self::Item) -> T + Sync + Send,
1262        ID: Fn() -> T + Sync + Send,
1263        T: Send,
1264    {
1265        Fold::new(self, identity, fold_op)
1266    }
1267
1268    /// Applies `fold_op` to the given `init` value with each item of this
1269    /// iterator, finally producing the value for further use.
1270    ///
1271    /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1272    /// it doesn't require the `init` type to be `Sync`, nor any other form
1273    /// of added synchronization.
1274    ///
1275    /// # Examples
1276    ///
1277    /// ```
1278    /// use rayon::prelude::*;
1279    ///
1280    /// let bytes = 0..22_u8;
1281    /// let sum = bytes.into_par_iter()
1282    ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1283    ///                .sum::<u32>();
1284    ///
1285    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1286    /// ```
1287    fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1288    where
1289        F: Fn(T, Self::Item) -> T + Sync + Send,
1290        T: Send + Clone,
1291    {
1292        FoldWith::new(self, init, fold_op)
1293    }
1294
1295    /// Performs a fallible parallel fold.
1296    ///
1297    /// This is a variation of [`fold()`] for operations which can fail with
1298    /// `Option::None` or `Result::Err`.  The first such failure stops
1299    /// processing the local set of items, without affecting other folds in the
1300    /// iterator's subdivisions.
1301    ///
1302    /// Often, `try_fold()` will be followed by [`try_reduce()`]
1303    /// for a final reduction and global short-circuiting effect.
1304    ///
1305    /// [`fold()`]: #method.fold
1306    /// [`try_reduce()`]: #method.try_reduce
1307    ///
1308    /// # Examples
1309    ///
1310    /// ```
1311    /// use rayon::prelude::*;
1312    ///
1313    /// let bytes = 0..22_u8;
1314    /// let sum = bytes.into_par_iter()
1315    ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1316    ///                .try_reduce(|| 0, u32::checked_add);
1317    ///
1318    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1319    /// ```
1320    fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1321    where
1322        F: Fn(T, Self::Item) -> R + Sync + Send,
1323        ID: Fn() -> T + Sync + Send,
1324        R: Try<Output = T> + Send,
1325    {
1326        TryFold::new(self, identity, fold_op)
1327    }
1328
1329    /// Performs a fallible parallel fold with a cloneable `init` value.
1330    ///
1331    /// This combines the `init` semantics of [`fold_with()`] and the failure
1332    /// semantics of [`try_fold()`].
1333    ///
1334    /// [`fold_with()`]: #method.fold_with
1335    /// [`try_fold()`]: #method.try_fold
1336    ///
1337    /// ```
1338    /// use rayon::prelude::*;
1339    ///
1340    /// let bytes = 0..22_u8;
1341    /// let sum = bytes.into_par_iter()
1342    ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1343    ///                .try_reduce(|| 0, u32::checked_add);
1344    ///
1345    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1346    /// ```
1347    fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1348    where
1349        F: Fn(T, Self::Item) -> R + Sync + Send,
1350        R: Try<Output = T> + Send,
1351        T: Clone + Send,
1352    {
1353        TryFoldWith::new(self, init, fold_op)
1354    }
1355
1356    /// Sums up the items in the iterator.
1357    ///
1358    /// Note that the order in items will be reduced is not specified,
1359    /// so if the `+` operator is not truly [associative] \(as is the
1360    /// case for floating point numbers), then the results are not
1361    /// fully deterministic.
1362    ///
1363    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1364    ///
1365    /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1366    /// except that the type of `0` and the `+` operation may vary
1367    /// depending on the type of value being produced.
1368    ///
1369    /// # Examples
1370    ///
1371    /// ```
1372    /// use rayon::prelude::*;
1373    ///
1374    /// let a = [1, 5, 7];
1375    ///
1376    /// let sum: i32 = a.par_iter().sum();
1377    ///
1378    /// assert_eq!(sum, 13);
1379    /// ```
1380    fn sum<S>(self) -> S
1381    where
1382        S: Send + Sum<Self::Item> + Sum<S>,
1383    {
1384        sum::sum(self)
1385    }
1386
1387    /// Multiplies all the items in the iterator.
1388    ///
1389    /// Note that the order in items will be reduced is not specified,
1390    /// so if the `*` operator is not truly [associative] \(as is the
1391    /// case for floating point numbers), then the results are not
1392    /// fully deterministic.
1393    ///
1394    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1395    ///
1396    /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1397    /// except that the type of `1` and the `*` operation may vary
1398    /// depending on the type of value being produced.
1399    ///
1400    /// # Examples
1401    ///
1402    /// ```
1403    /// use rayon::prelude::*;
1404    ///
1405    /// fn factorial(n: u32) -> u32 {
1406    ///    (1..n+1).into_par_iter().product()
1407    /// }
1408    ///
1409    /// assert_eq!(factorial(0), 1);
1410    /// assert_eq!(factorial(1), 1);
1411    /// assert_eq!(factorial(5), 120);
1412    /// ```
1413    fn product<P>(self) -> P
1414    where
1415        P: Send + Product<Self::Item> + Product<P>,
1416    {
1417        product::product(self)
1418    }
1419
1420    /// Computes the minimum of all the items in the iterator. If the
1421    /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1422    /// is returned.
1423    ///
1424    /// Note that the order in which the items will be reduced is not
1425    /// specified, so if the `Ord` impl is not truly associative, then
1426    /// the results are not deterministic.
1427    ///
1428    /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1429    ///
1430    /// # Examples
1431    ///
1432    /// ```
1433    /// use rayon::prelude::*;
1434    ///
1435    /// let a = [45, 74, 32];
1436    ///
1437    /// assert_eq!(a.par_iter().min(), Some(&32));
1438    ///
1439    /// let b: [i32; 0] = [];
1440    ///
1441    /// assert_eq!(b.par_iter().min(), None);
1442    /// ```
1443    fn min(self) -> Option<Self::Item>
1444    where
1445        Self::Item: Ord,
1446    {
1447        self.reduce_with(cmp::min)
1448    }
1449
1450    /// Computes the minimum of all the items in the iterator with respect to
1451    /// the given comparison function. If the iterator is empty, `None` is
1452    /// returned; otherwise, `Some(min)` is returned.
1453    ///
1454    /// Note that the order in which the items will be reduced is not
1455    /// specified, so if the comparison function is not associative, then
1456    /// the results are not deterministic.
1457    ///
1458    /// # Examples
1459    ///
1460    /// ```
1461    /// use rayon::prelude::*;
1462    ///
1463    /// let a = [-3_i32, 77, 53, 240, -1];
1464    ///
1465    /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1466    /// ```
1467    fn min_by<F>(self, f: F) -> Option<Self::Item>
1468    where
1469        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1470    {
1471        fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1472            move |a, b| match f(&a, &b) {
1473                Ordering::Greater => b,
1474                _ => a,
1475            }
1476        }
1477
1478        self.reduce_with(min(f))
1479    }
1480
1481    /// Computes the item that yields the minimum value for the given
1482    /// function. If the iterator is empty, `None` is returned;
1483    /// otherwise, `Some(item)` is returned.
1484    ///
1485    /// Note that the order in which the items will be reduced is not
1486    /// specified, so if the `Ord` impl is not truly associative, then
1487    /// the results are not deterministic.
1488    ///
1489    /// # Examples
1490    ///
1491    /// ```
1492    /// use rayon::prelude::*;
1493    ///
1494    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1495    ///
1496    /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1497    /// ```
1498    fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1499    where
1500        K: Ord + Send,
1501        F: Sync + Send + Fn(&Self::Item) -> K,
1502    {
1503        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1504            move |x| (f(&x), x)
1505        }
1506
1507        fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1508            match (a.0).cmp(&b.0) {
1509                Ordering::Greater => b,
1510                _ => a,
1511            }
1512        }
1513
1514        let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1515        Some(x)
1516    }
1517
1518    /// Computes the maximum of all the items in the iterator. If the
1519    /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1520    /// is returned.
1521    ///
1522    /// Note that the order in which the items will be reduced is not
1523    /// specified, so if the `Ord` impl is not truly associative, then
1524    /// the results are not deterministic.
1525    ///
1526    /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1527    ///
1528    /// # Examples
1529    ///
1530    /// ```
1531    /// use rayon::prelude::*;
1532    ///
1533    /// let a = [45, 74, 32];
1534    ///
1535    /// assert_eq!(a.par_iter().max(), Some(&74));
1536    ///
1537    /// let b: [i32; 0] = [];
1538    ///
1539    /// assert_eq!(b.par_iter().max(), None);
1540    /// ```
1541    fn max(self) -> Option<Self::Item>
1542    where
1543        Self::Item: Ord,
1544    {
1545        self.reduce_with(cmp::max)
1546    }
1547
1548    /// Computes the maximum of all the items in the iterator with respect to
1549    /// the given comparison function. If the iterator is empty, `None` is
1550    /// returned; otherwise, `Some(min)` is returned.
1551    ///
1552    /// Note that the order in which the items will be reduced is not
1553    /// specified, so if the comparison function is not associative, then
1554    /// the results are not deterministic.
1555    ///
1556    /// # Examples
1557    ///
1558    /// ```
1559    /// use rayon::prelude::*;
1560    ///
1561    /// let a = [-3_i32, 77, 53, 240, -1];
1562    ///
1563    /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1564    /// ```
1565    fn max_by<F>(self, f: F) -> Option<Self::Item>
1566    where
1567        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1568    {
1569        fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1570            move |a, b| match f(&a, &b) {
1571                Ordering::Greater => a,
1572                _ => b,
1573            }
1574        }
1575
1576        self.reduce_with(max(f))
1577    }
1578
1579    /// Computes the item that yields the maximum value for the given
1580    /// function. If the iterator is empty, `None` is returned;
1581    /// otherwise, `Some(item)` is returned.
1582    ///
1583    /// Note that the order in which the items will be reduced is not
1584    /// specified, so if the `Ord` impl is not truly associative, then
1585    /// the results are not deterministic.
1586    ///
1587    /// # Examples
1588    ///
1589    /// ```
1590    /// use rayon::prelude::*;
1591    ///
1592    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1593    ///
1594    /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1595    /// ```
1596    fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1597    where
1598        K: Ord + Send,
1599        F: Sync + Send + Fn(&Self::Item) -> K,
1600    {
1601        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1602            move |x| (f(&x), x)
1603        }
1604
1605        fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1606            match (a.0).cmp(&b.0) {
1607                Ordering::Greater => a,
1608                _ => b,
1609            }
1610        }
1611
1612        let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1613        Some(x)
1614    }
1615
1616    /// Takes two iterators and creates a new iterator over both.
1617    ///
1618    /// # Examples
1619    ///
1620    /// ```
1621    /// use rayon::prelude::*;
1622    ///
1623    /// let a = [0, 1, 2];
1624    /// let b = [9, 8, 7];
1625    ///
1626    /// let par_iter = a.par_iter().chain(b.par_iter());
1627    ///
1628    /// let chained: Vec<_> = par_iter.cloned().collect();
1629    ///
1630    /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1631    /// ```
1632    fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1633    where
1634        C: IntoParallelIterator<Item = Self::Item>,
1635    {
1636        Chain::new(self, chain.into_par_iter())
1637    }
1638
1639    /// Searches for **some** item in the parallel iterator that
1640    /// matches the given predicate and returns it. This operation
1641    /// is similar to [`find` on sequential iterators][find] but
1642    /// the item returned may not be the **first** one in the parallel
1643    /// sequence which matches, since we search the entire sequence in parallel.
1644    ///
1645    /// Once a match is found, we will attempt to stop processing
1646    /// the rest of the items in the iterator as soon as possible
1647    /// (just as `find` stops iterating once a match is found).
1648    ///
1649    /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1650    ///
1651    /// # Examples
1652    ///
1653    /// ```
1654    /// use rayon::prelude::*;
1655    ///
1656    /// let a = [1, 2, 3, 3];
1657    ///
1658    /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1659    ///
1660    /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1661    /// ```
1662    fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1663    where
1664        P: Fn(&Self::Item) -> bool + Sync + Send,
1665    {
1666        find::find(self, predicate)
1667    }
1668
1669    /// Searches for the sequentially **first** item in the parallel iterator
1670    /// that matches the given predicate and returns it.
1671    ///
1672    /// Once a match is found, all attempts to the right of the match
1673    /// will be stopped, while attempts to the left must continue in case
1674    /// an earlier match is found.
1675    ///
1676    /// Note that not all parallel iterators have a useful order, much like
1677    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1678    /// just want the first match that discovered anywhere in the iterator,
1679    /// `find_any` is a better choice.
1680    ///
1681    /// # Examples
1682    ///
1683    /// ```
1684    /// use rayon::prelude::*;
1685    ///
1686    /// let a = [1, 2, 3, 3];
1687    ///
1688    /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1689    ///
1690    /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1691    /// ```
1692    fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1693    where
1694        P: Fn(&Self::Item) -> bool + Sync + Send,
1695    {
1696        find_first_last::find_first(self, predicate)
1697    }
1698
1699    /// Searches for the sequentially **last** item in the parallel iterator
1700    /// that matches the given predicate and returns it.
1701    ///
1702    /// Once a match is found, all attempts to the left of the match
1703    /// will be stopped, while attempts to the right must continue in case
1704    /// a later match is found.
1705    ///
1706    /// Note that not all parallel iterators have a useful order, much like
1707    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1708    /// order doesn't actually matter to you, `find_any` is a better choice.
1709    ///
1710    /// # Examples
1711    ///
1712    /// ```
1713    /// use rayon::prelude::*;
1714    ///
1715    /// let a = [1, 2, 3, 3];
1716    ///
1717    /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1718    ///
1719    /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1720    /// ```
1721    fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1722    where
1723        P: Fn(&Self::Item) -> bool + Sync + Send,
1724    {
1725        find_first_last::find_last(self, predicate)
1726    }
1727
1728    /// Applies the given predicate to the items in the parallel iterator
1729    /// and returns **any** non-None result of the map operation.
1730    ///
1731    /// Once a non-None value is produced from the map operation, we will
1732    /// attempt to stop processing the rest of the items in the iterator
1733    /// as soon as possible.
1734    ///
1735    /// Note that this method only returns **some** item in the parallel
1736    /// iterator that is not None from the map predicate. The item returned
1737    /// may not be the **first** non-None value produced in the parallel
1738    /// sequence, since the entire sequence is mapped over in parallel.
1739    ///
1740    /// # Examples
1741    ///
1742    /// ```
1743    /// use rayon::prelude::*;
1744    ///
1745    /// let c = ["lol", "NaN", "5", "5"];
1746    ///
1747    /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1748    ///
1749    /// assert_eq!(found_number, Some(5));
1750    /// ```
1751    fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1752    where
1753        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1754        R: Send,
1755    {
1756        fn yes<T>(_: &T) -> bool {
1757            true
1758        }
1759        self.filter_map(predicate).find_any(yes)
1760    }
1761
1762    /// Applies the given predicate to the items in the parallel iterator and
1763    /// returns the sequentially **first** non-None result of the map operation.
1764    ///
1765    /// Once a non-None value is produced from the map operation, all attempts
1766    /// to the right of the match will be stopped, while attempts to the left
1767    /// must continue in case an earlier match is found.
1768    ///
1769    /// Note that not all parallel iterators have a useful order, much like
1770    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1771    /// just want the first non-None value discovered anywhere in the iterator,
1772    /// `find_map_any` is a better choice.
1773    ///
1774    /// # Examples
1775    ///
1776    /// ```
1777    /// use rayon::prelude::*;
1778    ///
1779    /// let c = ["lol", "NaN", "2", "5"];
1780    ///
1781    /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1782    ///
1783    /// assert_eq!(first_number, Some(2));
1784    /// ```
1785    fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1786    where
1787        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1788        R: Send,
1789    {
1790        fn yes<T>(_: &T) -> bool {
1791            true
1792        }
1793        self.filter_map(predicate).find_first(yes)
1794    }
1795
1796    /// Applies the given predicate to the items in the parallel iterator and
1797    /// returns the sequentially **last** non-None result of the map operation.
1798    ///
1799    /// Once a non-None value is produced from the map operation, all attempts
1800    /// to the left of the match will be stopped, while attempts to the right
1801    /// must continue in case a later match is found.
1802    ///
1803    /// Note that not all parallel iterators have a useful order, much like
1804    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1805    /// just want the first non-None value discovered anywhere in the iterator,
1806    /// `find_map_any` is a better choice.
1807    ///
1808    /// # Examples
1809    ///
1810    /// ```
1811    /// use rayon::prelude::*;
1812    ///
1813    /// let c = ["lol", "NaN", "2", "5"];
1814    ///
1815    /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1816    ///
1817    /// assert_eq!(last_number, Some(5));
1818    /// ```
1819    fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1820    where
1821        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1822        R: Send,
1823    {
1824        fn yes<T>(_: &T) -> bool {
1825            true
1826        }
1827        self.filter_map(predicate).find_last(yes)
1828    }
1829
1830    #[doc(hidden)]
1831    #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1832                         `find_first`, or `find_last`")]
1833    fn find<P>(self, predicate: P) -> Option<Self::Item>
1834    where
1835        P: Fn(&Self::Item) -> bool + Sync + Send,
1836    {
1837        self.find_any(predicate)
1838    }
1839
1840    /// Searches for **some** item in the parallel iterator that
1841    /// matches the given predicate, and if so returns true.  Once
1842    /// a match is found, we'll attempt to stop process the rest
1843    /// of the items.  Proving that there's no match, returning false,
1844    /// does require visiting every item.
1845    ///
1846    /// # Examples
1847    ///
1848    /// ```
1849    /// use rayon::prelude::*;
1850    ///
1851    /// let a = [0, 12, 3, 4, 0, 23, 0];
1852    ///
1853    /// let is_valid = a.par_iter().any(|&x| x > 10);
1854    ///
1855    /// assert!(is_valid);
1856    /// ```
1857    fn any<P>(self, predicate: P) -> bool
1858    where
1859        P: Fn(Self::Item) -> bool + Sync + Send,
1860    {
1861        self.map(predicate).find_any(bool::clone).is_some()
1862    }
1863
1864    /// Tests that every item in the parallel iterator matches the given
1865    /// predicate, and if so returns true.  If a counter-example is found,
1866    /// we'll attempt to stop processing more items, then return false.
1867    ///
1868    /// # Examples
1869    ///
1870    /// ```
1871    /// use rayon::prelude::*;
1872    ///
1873    /// let a = [0, 12, 3, 4, 0, 23, 0];
1874    ///
1875    /// let is_valid = a.par_iter().all(|&x| x > 10);
1876    ///
1877    /// assert!(!is_valid);
1878    /// ```
1879    fn all<P>(self, predicate: P) -> bool
1880    where
1881        P: Fn(Self::Item) -> bool + Sync + Send,
1882    {
1883        #[inline]
1884        fn is_false(x: &bool) -> bool {
1885            !x
1886        }
1887
1888        self.map(predicate).find_any(is_false).is_none()
1889    }
1890
1891    /// Creates an iterator over the `Some` items of this iterator, halting
1892    /// as soon as any `None` is found.
1893    ///
1894    /// # Examples
1895    ///
1896    /// ```
1897    /// use rayon::prelude::*;
1898    /// use std::sync::atomic::{AtomicUsize, Ordering};
1899    ///
1900    /// let counter = AtomicUsize::new(0);
1901    /// let value = (0_i32..2048)
1902    ///     .into_par_iter()
1903    ///     .map(|x| {
1904    ///              counter.fetch_add(1, Ordering::SeqCst);
1905    ///              if x < 1024 { Some(x) } else { None }
1906    ///          })
1907    ///     .while_some()
1908    ///     .max();
1909    ///
1910    /// assert!(value < Some(1024));
1911    /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1912    /// ```
1913    fn while_some<T>(self) -> WhileSome<Self>
1914    where
1915        Self: ParallelIterator<Item = Option<T>>,
1916        T: Send,
1917    {
1918        WhileSome::new(self)
1919    }
1920
1921    /// Wraps an iterator with a fuse in case of panics, to halt all threads
1922    /// as soon as possible.
1923    ///
1924    /// Panics within parallel iterators are always propagated to the caller,
1925    /// but they don't always halt the rest of the iterator right away, due to
1926    /// the internal semantics of [`join`]. This adaptor makes a greater effort
1927    /// to stop processing other items sooner, with the cost of additional
1928    /// synchronization overhead, which may also inhibit some optimizations.
1929    ///
1930    /// [`join`]: ../fn.join.html#panics
1931    ///
1932    /// # Examples
1933    ///
1934    /// If this code didn't use `panic_fuse()`, it would continue processing
1935    /// many more items in other threads (with long sleep delays) before the
1936    /// panic is finally propagated.
1937    ///
1938    /// ```should_panic
1939    /// use rayon::prelude::*;
1940    /// use std::{thread, time};
1941    ///
1942    /// (0..1_000_000)
1943    ///     .into_par_iter()
1944    ///     .panic_fuse()
1945    ///     .for_each(|i| {
1946    ///         // simulate some work
1947    ///         thread::sleep(time::Duration::from_secs(1));
1948    ///         assert!(i > 0); // oops!
1949    ///     });
1950    /// ```
1951    fn panic_fuse(self) -> PanicFuse<Self> {
1952        PanicFuse::new(self)
1953    }
1954
1955    /// Creates a fresh collection containing all the elements produced
1956    /// by this parallel iterator.
1957    ///
1958    /// You may prefer [`collect_into_vec()`] implemented on
1959    /// [`IndexedParallelIterator`], if your underlying iterator also implements
1960    /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1961    /// of how many elements the iterator contains, and even allows you to reuse
1962    /// an existing vector's backing store rather than allocating a fresh vector.
1963    ///
1964    /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
1965    /// [`collect_into_vec()`]:
1966    ///     trait.IndexedParallelIterator.html#method.collect_into_vec
1967    ///
1968    /// # Examples
1969    ///
1970    /// ```
1971    /// use rayon::prelude::*;
1972    ///
1973    /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1974    ///
1975    /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1976    ///
1977    /// assert_eq!(sync_vec, async_vec);
1978    /// ```
1979    ///
1980    /// You can collect a pair of collections like [`unzip`](#method.unzip)
1981    /// for paired items:
1982    ///
1983    /// ```
1984    /// use rayon::prelude::*;
1985    ///
1986    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1987    /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1988    ///
1989    /// assert_eq!(first, [0, 1, 2, 3]);
1990    /// assert_eq!(second, [1, 2, 3, 4]);
1991    /// ```
1992    ///
1993    /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1994    ///
1995    /// ```
1996    /// use rayon::prelude::*;
1997    /// use rayon::iter::Either;
1998    ///
1999    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2000    ///     if x % 2 == 0 {
2001    ///         Either::Left(x * 4)
2002    ///     } else {
2003    ///         Either::Right(x * 3)
2004    ///     }
2005    /// }).collect();
2006    ///
2007    /// assert_eq!(left, [0, 8, 16, 24]);
2008    /// assert_eq!(right, [3, 9, 15, 21]);
2009    /// ```
2010    ///
2011    /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2012    ///
2013    /// ```
2014    /// use rayon::prelude::*;
2015    /// use rayon::iter::Either;
2016    ///
2017    /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2018    ///     = (0..8).into_par_iter().map(|x| {
2019    ///         if x % 2 == 0 {
2020    ///             (x, Either::Left(x * 4))
2021    ///         } else {
2022    ///             (-x, Either::Right(x * 3))
2023    ///         }
2024    ///     }).collect();
2025    ///
2026    /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2027    /// assert_eq!(left, [0, 8, 16, 24]);
2028    /// assert_eq!(right, [3, 9, 15, 21]);
2029    /// ```
2030    ///
2031    /// All of that can _also_ be combined with short-circuiting collection of
2032    /// `Result` or `Option` types:
2033    ///
2034    /// ```
2035    /// use rayon::prelude::*;
2036    /// use rayon::iter::Either;
2037    ///
2038    /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2039    ///     = (0..8).into_par_iter().map(|x| {
2040    ///         if x > 5 {
2041    ///             Err(x)
2042    ///         } else if x % 2 == 0 {
2043    ///             Ok((x, Either::Left(x * 4)))
2044    ///         } else {
2045    ///             Ok((-x, Either::Right(x * 3)))
2046    ///         }
2047    ///     }).collect();
2048    ///
2049    /// let error = result.unwrap_err();
2050    /// assert!(error == 6 || error == 7);
2051    /// ```
2052    fn collect<C>(self) -> C
2053    where
2054        C: FromParallelIterator<Self::Item>,
2055    {
2056        C::from_par_iter(self)
2057    }
2058
2059    /// Unzips the items of a parallel iterator into a pair of arbitrary
2060    /// `ParallelExtend` containers.
2061    ///
2062    /// You may prefer to use `unzip_into_vecs()`, which allocates more
2063    /// efficiently with precise knowledge of how many elements the
2064    /// iterator contains, and even allows you to reuse existing
2065    /// vectors' backing stores rather than allocating fresh vectors.
2066    ///
2067    /// # Examples
2068    ///
2069    /// ```
2070    /// use rayon::prelude::*;
2071    ///
2072    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2073    ///
2074    /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2075    ///
2076    /// assert_eq!(left, [0, 1, 2, 3]);
2077    /// assert_eq!(right, [1, 2, 3, 4]);
2078    /// ```
2079    ///
2080    /// Nested pairs can be unzipped too.
2081    ///
2082    /// ```
2083    /// use rayon::prelude::*;
2084    ///
2085    /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2086    ///     .map(|i| (i, (i * i, i * i * i)))
2087    ///     .unzip();
2088    ///
2089    /// assert_eq!(values, [0, 1, 2, 3]);
2090    /// assert_eq!(squares, [0, 1, 4, 9]);
2091    /// assert_eq!(cubes, [0, 1, 8, 27]);
2092    /// ```
2093    fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2094    where
2095        Self: ParallelIterator<Item = (A, B)>,
2096        FromA: Default + Send + ParallelExtend<A>,
2097        FromB: Default + Send + ParallelExtend<B>,
2098        A: Send,
2099        B: Send,
2100    {
2101        unzip::unzip(self)
2102    }
2103
2104    /// Partitions the items of a parallel iterator into a pair of arbitrary
2105    /// `ParallelExtend` containers.  Items for which the `predicate` returns
2106    /// true go into the first container, and the rest go into the second.
2107    ///
2108    /// Note: unlike the standard `Iterator::partition`, this allows distinct
2109    /// collection types for the left and right items.  This is more flexible,
2110    /// but may require new type annotations when converting sequential code
2111    /// that used type inference assuming the two were the same.
2112    ///
2113    /// # Examples
2114    ///
2115    /// ```
2116    /// use rayon::prelude::*;
2117    ///
2118    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2119    ///
2120    /// assert_eq!(left, [0, 2, 4, 6]);
2121    /// assert_eq!(right, [1, 3, 5, 7]);
2122    /// ```
2123    fn partition<A, B, P>(self, predicate: P) -> (A, B)
2124    where
2125        A: Default + Send + ParallelExtend<Self::Item>,
2126        B: Default + Send + ParallelExtend<Self::Item>,
2127        P: Fn(&Self::Item) -> bool + Sync + Send,
2128    {
2129        unzip::partition(self, predicate)
2130    }
2131
2132    /// Partitions and maps the items of a parallel iterator into a pair of
2133    /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2134    /// the first container, and `Either::Right` items go into the second.
2135    ///
2136    /// # Examples
2137    ///
2138    /// ```
2139    /// use rayon::prelude::*;
2140    /// use rayon::iter::Either;
2141    ///
2142    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2143    ///     .partition_map(|x| {
2144    ///         if x % 2 == 0 {
2145    ///             Either::Left(x * 4)
2146    ///         } else {
2147    ///             Either::Right(x * 3)
2148    ///         }
2149    ///     });
2150    ///
2151    /// assert_eq!(left, [0, 8, 16, 24]);
2152    /// assert_eq!(right, [3, 9, 15, 21]);
2153    /// ```
2154    ///
2155    /// Nested `Either` enums can be split as well.
2156    ///
2157    /// ```
2158    /// use rayon::prelude::*;
2159    /// use rayon::iter::Either::*;
2160    ///
2161    /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2162    ///     .into_par_iter()
2163    ///     .partition_map(|x| match (x % 3, x % 5) {
2164    ///         (0, 0) => Left(Left(x)),
2165    ///         (0, _) => Left(Right(x)),
2166    ///         (_, 0) => Right(Left(x)),
2167    ///         (_, _) => Right(Right(x)),
2168    ///     });
2169    ///
2170    /// assert_eq!(fizzbuzz, [15]);
2171    /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2172    /// assert_eq!(buzz, [5, 10]);
2173    /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2174    /// ```
2175    fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2176    where
2177        A: Default + Send + ParallelExtend<L>,
2178        B: Default + Send + ParallelExtend<R>,
2179        P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2180        L: Send,
2181        R: Send,
2182    {
2183        unzip::partition_map(self, predicate)
2184    }
2185
2186    /// Intersperses clones of an element between items of this iterator.
2187    ///
2188    /// # Examples
2189    ///
2190    /// ```
2191    /// use rayon::prelude::*;
2192    ///
2193    /// let x = vec![1, 2, 3];
2194    /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2195    ///
2196    /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2197    /// ```
2198    fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2199    where
2200        Self::Item: Clone,
2201    {
2202        Intersperse::new(self, element)
2203    }
2204
2205    /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2206    ///
2207    /// This is similar to [`IndexedParallelIterator::take`] without being
2208    /// constrained to the "first" `n` of the original iterator order. The
2209    /// taken items will still maintain their relative order where that is
2210    /// visible in `collect`, `reduce`, and similar outputs.
2211    ///
2212    /// # Examples
2213    ///
2214    /// ```
2215    /// use rayon::prelude::*;
2216    ///
2217    /// let result: Vec<_> = (0..100)
2218    ///     .into_par_iter()
2219    ///     .filter(|&x| x % 2 == 0)
2220    ///     .take_any(5)
2221    ///     .collect();
2222    ///
2223    /// assert_eq!(result.len(), 5);
2224    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2225    /// ```
2226    fn take_any(self, n: usize) -> TakeAny<Self> {
2227        TakeAny::new(self, n)
2228    }
2229
2230    /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2231    ///
2232    /// This is similar to [`IndexedParallelIterator::skip`] without being
2233    /// constrained to the "first" `n` of the original iterator order. The
2234    /// remaining items will still maintain their relative order where that is
2235    /// visible in `collect`, `reduce`, and similar outputs.
2236    ///
2237    /// # Examples
2238    ///
2239    /// ```
2240    /// use rayon::prelude::*;
2241    ///
2242    /// let result: Vec<_> = (0..100)
2243    ///     .into_par_iter()
2244    ///     .filter(|&x| x % 2 == 0)
2245    ///     .skip_any(5)
2246    ///     .collect();
2247    ///
2248    /// assert_eq!(result.len(), 45);
2249    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2250    /// ```
2251    fn skip_any(self, n: usize) -> SkipAny<Self> {
2252        SkipAny::new(self, n)
2253    }
2254
2255    /// Creates an iterator that takes elements from *anywhere* in the original iterator
2256    /// until the given `predicate` returns `false`.
2257    ///
2258    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2259    /// global condition unrelated to the item itself, or some combination thereof.
2260    ///
2261    /// If parallel calls to the `predicate` race and give different results, then the
2262    /// `true` results will still take those particular items, while respecting the `false`
2263    /// result from elsewhere to skip any further items.
2264    ///
2265    /// This is similar to [`Iterator::take_while`] without being constrained to the original
2266    /// iterator order. The taken items will still maintain their relative order where that is
2267    /// visible in `collect`, `reduce`, and similar outputs.
2268    ///
2269    /// # Examples
2270    ///
2271    /// ```
2272    /// use rayon::prelude::*;
2273    ///
2274    /// let result: Vec<_> = (0..100)
2275    ///     .into_par_iter()
2276    ///     .take_any_while(|x| *x < 50)
2277    ///     .collect();
2278    ///
2279    /// assert!(result.len() <= 50);
2280    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2281    /// ```
2282    ///
2283    /// ```
2284    /// use rayon::prelude::*;
2285    /// use std::sync::atomic::AtomicUsize;
2286    /// use std::sync::atomic::Ordering::Relaxed;
2287    ///
2288    /// // Collect any group of items that sum <= 1000
2289    /// let quota = AtomicUsize::new(1000);
2290    /// let result: Vec<_> = (0_usize..100)
2291    ///     .into_par_iter()
2292    ///     .take_any_while(|&x| {
2293    ///         quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2294    ///             .is_ok()
2295    ///     })
2296    ///     .collect();
2297    ///
2298    /// let sum = result.iter().sum::<usize>();
2299    /// assert!(matches!(sum, 902..=1000));
2300    /// ```
2301    fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2302    where
2303        P: Fn(&Self::Item) -> bool + Sync + Send,
2304    {
2305        TakeAnyWhile::new(self, predicate)
2306    }
2307
2308    /// Creates an iterator that skips elements from *anywhere* in the original iterator
2309    /// until the given `predicate` returns `false`.
2310    ///
2311    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2312    /// global condition unrelated to the item itself, or some combination thereof.
2313    ///
2314    /// If parallel calls to the `predicate` race and give different results, then the
2315    /// `true` results will still skip those particular items, while respecting the `false`
2316    /// result from elsewhere to skip any further items.
2317    ///
2318    /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2319    /// iterator order. The remaining items will still maintain their relative order where that is
2320    /// visible in `collect`, `reduce`, and similar outputs.
2321    ///
2322    /// # Examples
2323    ///
2324    /// ```
2325    /// use rayon::prelude::*;
2326    ///
2327    /// let result: Vec<_> = (0..100)
2328    ///     .into_par_iter()
2329    ///     .skip_any_while(|x| *x < 50)
2330    ///     .collect();
2331    ///
2332    /// assert!(result.len() >= 50);
2333    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2334    /// ```
2335    fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2336    where
2337        P: Fn(&Self::Item) -> bool + Sync + Send,
2338    {
2339        SkipAnyWhile::new(self, predicate)
2340    }
2341
2342    /// Internal method used to define the behavior of this parallel
2343    /// iterator. You should not need to call this directly.
2344    ///
2345    /// This method causes the iterator `self` to start producing
2346    /// items and to feed them to the consumer `consumer` one by one.
2347    /// It may split the consumer before doing so to create the
2348    /// opportunity to produce in parallel.
2349    ///
2350    /// See the [README] for more details on the internals of parallel
2351    /// iterators.
2352    ///
2353    /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
2354    fn drive_unindexed<C>(self, consumer: C) -> C::Result
2355    where
2356        C: UnindexedConsumer<Self::Item>;
2357
2358    /// Internal method used to define the behavior of this parallel
2359    /// iterator. You should not need to call this directly.
2360    ///
2361    /// Returns the number of items produced by this iterator, if known
2362    /// statically. This can be used by consumers to trigger special fast
2363    /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2364    /// use the (indexed) `Consumer` methods when driving a consumer, such
2365    /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2366    /// other `UnindexedConsumer` methods -- or returning an inaccurate
2367    /// value -- may result in panics.
2368    ///
2369    /// This method is currently used to optimize `collect` for want
2370    /// of true Rust specialization; it may be removed when
2371    /// specialization is stable.
2372    fn opt_len(&self) -> Option<usize> {
2373        None
2374    }
2375}
2376
2377impl<T: ParallelIterator> IntoParallelIterator for T {
2378    type Iter = T;
2379    type Item = T::Item;
2380
2381    fn into_par_iter(self) -> T {
2382        self
2383    }
2384}
2385
2386/// An iterator that supports "random access" to its data, meaning
2387/// that you can split it at arbitrary indices and draw data from
2388/// those points.
2389///
2390/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2391// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2392#[allow(clippy::len_without_is_empty)]
2393pub trait IndexedParallelIterator: ParallelIterator {
2394    /// Collects the results of the iterator into the specified
2395    /// vector. The vector is always truncated before execution
2396    /// begins. If possible, reusing the vector across calls can lead
2397    /// to better performance since it reuses the same backing buffer.
2398    ///
2399    /// # Examples
2400    ///
2401    /// ```
2402    /// use rayon::prelude::*;
2403    ///
2404    /// // any prior data will be truncated
2405    /// let mut vec = vec![-1, -2, -3];
2406    ///
2407    /// (0..5).into_par_iter()
2408    ///     .collect_into_vec(&mut vec);
2409    ///
2410    /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2411    /// ```
2412    fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2413        collect::collect_into_vec(self, target);
2414    }
2415
2416    /// Unzips the results of the iterator into the specified
2417    /// vectors. The vectors are always truncated before execution
2418    /// begins. If possible, reusing the vectors across calls can lead
2419    /// to better performance since they reuse the same backing buffer.
2420    ///
2421    /// # Examples
2422    ///
2423    /// ```
2424    /// use rayon::prelude::*;
2425    ///
2426    /// // any prior data will be truncated
2427    /// let mut left = vec![42; 10];
2428    /// let mut right = vec![-1; 10];
2429    ///
2430    /// (10..15).into_par_iter()
2431    ///     .enumerate()
2432    ///     .unzip_into_vecs(&mut left, &mut right);
2433    ///
2434    /// assert_eq!(left, [0, 1, 2, 3, 4]);
2435    /// assert_eq!(right, [10, 11, 12, 13, 14]);
2436    /// ```
2437    fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2438    where
2439        Self: IndexedParallelIterator<Item = (A, B)>,
2440        A: Send,
2441        B: Send,
2442    {
2443        collect::unzip_into_vecs(self, left, right);
2444    }
2445
2446    /// Iterates over tuples `(A, B)`, where the items `A` are from
2447    /// this iterator and `B` are from the iterator given as argument.
2448    /// Like the `zip` method on ordinary iterators, if the two
2449    /// iterators are of unequal length, you only get the items they
2450    /// have in common.
2451    ///
2452    /// # Examples
2453    ///
2454    /// ```
2455    /// use rayon::prelude::*;
2456    ///
2457    /// let result: Vec<_> = (1..4)
2458    ///     .into_par_iter()
2459    ///     .zip(vec!['a', 'b', 'c'])
2460    ///     .collect();
2461    ///
2462    /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2463    /// ```
2464    fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2465    where
2466        Z: IntoParallelIterator,
2467        Z::Iter: IndexedParallelIterator,
2468    {
2469        Zip::new(self, zip_op.into_par_iter())
2470    }
2471
2472    /// The same as `Zip`, but requires that both iterators have the same length.
2473    ///
2474    /// # Panics
2475    /// Will panic if `self` and `zip_op` are not the same length.
2476    ///
2477    /// ```should_panic
2478    /// use rayon::prelude::*;
2479    ///
2480    /// let one = [1u8];
2481    /// let two = [2u8, 2];
2482    /// let one_iter = one.par_iter();
2483    /// let two_iter = two.par_iter();
2484    ///
2485    /// // this will panic
2486    /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2487    ///
2488    /// // we should never get here
2489    /// assert_eq!(1, zipped.len());
2490    /// ```
2491    #[track_caller]
2492    fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2493    where
2494        Z: IntoParallelIterator,
2495        Z::Iter: IndexedParallelIterator,
2496    {
2497        let zip_op_iter = zip_op.into_par_iter();
2498        assert_eq!(
2499            self.len(),
2500            zip_op_iter.len(),
2501            "iterators must have the same length"
2502        );
2503        ZipEq::new(self, zip_op_iter)
2504    }
2505
2506    /// Interleaves elements of this iterator and the other given
2507    /// iterator. Alternately yields elements from this iterator and
2508    /// the given iterator, until both are exhausted. If one iterator
2509    /// is exhausted before the other, the last elements are provided
2510    /// from the other.
2511    ///
2512    /// # Examples
2513    ///
2514    /// ```
2515    /// use rayon::prelude::*;
2516    /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2517    /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2518    /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2519    /// ```
2520    fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2521    where
2522        I: IntoParallelIterator<Item = Self::Item>,
2523        I::Iter: IndexedParallelIterator<Item = Self::Item>,
2524    {
2525        Interleave::new(self, other.into_par_iter())
2526    }
2527
2528    /// Interleaves elements of this iterator and the other given
2529    /// iterator, until one is exhausted.
2530    ///
2531    /// # Examples
2532    ///
2533    /// ```
2534    /// use rayon::prelude::*;
2535    /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2536    /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2537    /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2538    /// ```
2539    fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2540    where
2541        I: IntoParallelIterator<Item = Self::Item>,
2542        I::Iter: IndexedParallelIterator<Item = Self::Item>,
2543    {
2544        InterleaveShortest::new(self, other.into_par_iter())
2545    }
2546
2547    /// Splits an iterator up into fixed-size chunks.
2548    ///
2549    /// Returns an iterator that returns `Vec`s of the given number of elements.
2550    /// If the number of elements in the iterator is not divisible by `chunk_size`,
2551    /// the last chunk may be shorter than `chunk_size`.
2552    ///
2553    /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2554    /// slices, without having to allocate intermediate `Vec`s for the chunks.
2555    ///
2556    /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2557    /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2558    ///
2559    /// # Examples
2560    ///
2561    /// ```
2562    /// use rayon::prelude::*;
2563    /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2564    /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2565    /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2566    /// ```
2567    #[track_caller]
2568    fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2569        assert!(chunk_size != 0, "chunk_size must not be zero");
2570        Chunks::new(self, chunk_size)
2571    }
2572
2573    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2574    /// each chunk.
2575    ///
2576    /// Returns an iterator that produces a folded result for each chunk of items
2577    /// produced by this iterator.
2578    ///
2579    /// This works essentially like:
2580    ///
2581    /// ```text
2582    /// iter.chunks(chunk_size)
2583    ///     .map(|chunk|
2584    ///         chunk.into_iter()
2585    ///             .fold(identity, fold_op)
2586    ///     )
2587    /// ```
2588    ///
2589    /// except there is no per-chunk allocation overhead.
2590    ///
2591    /// [`fold()`]: std::iter::Iterator#method.fold
2592    ///
2593    /// **Panics** if `chunk_size` is 0.
2594    ///
2595    /// # Examples
2596    ///
2597    /// ```
2598    /// use rayon::prelude::*;
2599    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2600    /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2601    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2602    /// ```
2603    #[track_caller]
2604    fn fold_chunks<T, ID, F>(
2605        self,
2606        chunk_size: usize,
2607        identity: ID,
2608        fold_op: F,
2609    ) -> FoldChunks<Self, ID, F>
2610    where
2611        ID: Fn() -> T + Send + Sync,
2612        F: Fn(T, Self::Item) -> T + Send + Sync,
2613        T: Send,
2614    {
2615        assert!(chunk_size != 0, "chunk_size must not be zero");
2616        FoldChunks::new(self, chunk_size, identity, fold_op)
2617    }
2618
2619    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2620    /// each chunk.
2621    ///
2622    /// Returns an iterator that produces a folded result for each chunk of items
2623    /// produced by this iterator.
2624    ///
2625    /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2626    /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2627    /// added synchronization.
2628    ///
2629    /// [`fold()`]: std::iter::Iterator#method.fold
2630    ///
2631    /// **Panics** if `chunk_size` is 0.
2632    ///
2633    /// # Examples
2634    ///
2635    /// ```
2636    /// use rayon::prelude::*;
2637    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2638    /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2639    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2640    /// ```
2641    #[track_caller]
2642    fn fold_chunks_with<T, F>(
2643        self,
2644        chunk_size: usize,
2645        init: T,
2646        fold_op: F,
2647    ) -> FoldChunksWith<Self, T, F>
2648    where
2649        T: Send + Clone,
2650        F: Fn(T, Self::Item) -> T + Send + Sync,
2651    {
2652        assert!(chunk_size != 0, "chunk_size must not be zero");
2653        FoldChunksWith::new(self, chunk_size, init, fold_op)
2654    }
2655
2656    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2657    /// another.
2658    ///
2659    /// # Examples
2660    ///
2661    /// ```
2662    /// use rayon::prelude::*;
2663    /// use std::cmp::Ordering::*;
2664    ///
2665    /// let x = vec![1, 2, 3];
2666    /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2667    /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2668    /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2669    /// ```
2670    fn cmp<I>(self, other: I) -> Ordering
2671    where
2672        I: IntoParallelIterator<Item = Self::Item>,
2673        I::Iter: IndexedParallelIterator,
2674        Self::Item: Ord,
2675    {
2676        #[inline]
2677        fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2678            Ord::cmp(&x, &y)
2679        }
2680
2681        #[inline]
2682        fn inequal(&ord: &Ordering) -> bool {
2683            ord != Ordering::Equal
2684        }
2685
2686        let other = other.into_par_iter();
2687        let ord_len = self.len().cmp(&other.len());
2688        self.zip(other)
2689            .map(ordering)
2690            .find_first(inequal)
2691            .unwrap_or(ord_len)
2692    }
2693
2694    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2695    /// another.
2696    ///
2697    /// # Examples
2698    ///
2699    /// ```
2700    /// use rayon::prelude::*;
2701    /// use std::cmp::Ordering::*;
2702    /// use std::f64::NAN;
2703    ///
2704    /// let x = vec![1.0, 2.0, 3.0];
2705    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2706    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2707    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2708    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2709    /// ```
2710    fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2711    where
2712        I: IntoParallelIterator,
2713        I::Iter: IndexedParallelIterator,
2714        Self::Item: PartialOrd<I::Item>,
2715    {
2716        #[inline]
2717        fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2718            PartialOrd::partial_cmp(&x, &y)
2719        }
2720
2721        #[inline]
2722        fn inequal(&ord: &Option<Ordering>) -> bool {
2723            ord != Some(Ordering::Equal)
2724        }
2725
2726        let other = other.into_par_iter();
2727        let ord_len = self.len().cmp(&other.len());
2728        self.zip(other)
2729            .map(ordering)
2730            .find_first(inequal)
2731            .unwrap_or(Some(ord_len))
2732    }
2733
2734    /// Determines if the elements of this `ParallelIterator`
2735    /// are equal to those of another
2736    fn eq<I>(self, other: I) -> bool
2737    where
2738        I: IntoParallelIterator,
2739        I::Iter: IndexedParallelIterator,
2740        Self::Item: PartialEq<I::Item>,
2741    {
2742        #[inline]
2743        fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2744            PartialEq::eq(&x, &y)
2745        }
2746
2747        let other = other.into_par_iter();
2748        self.len() == other.len() && self.zip(other).all(eq)
2749    }
2750
2751    /// Determines if the elements of this `ParallelIterator`
2752    /// are unequal to those of another
2753    fn ne<I>(self, other: I) -> bool
2754    where
2755        I: IntoParallelIterator,
2756        I::Iter: IndexedParallelIterator,
2757        Self::Item: PartialEq<I::Item>,
2758    {
2759        !self.eq(other)
2760    }
2761
2762    /// Determines if the elements of this `ParallelIterator`
2763    /// are lexicographically less than those of another.
2764    fn lt<I>(self, other: I) -> bool
2765    where
2766        I: IntoParallelIterator,
2767        I::Iter: IndexedParallelIterator,
2768        Self::Item: PartialOrd<I::Item>,
2769    {
2770        self.partial_cmp(other) == Some(Ordering::Less)
2771    }
2772
2773    /// Determines if the elements of this `ParallelIterator`
2774    /// are less or equal to those of another.
2775    fn le<I>(self, other: I) -> bool
2776    where
2777        I: IntoParallelIterator,
2778        I::Iter: IndexedParallelIterator,
2779        Self::Item: PartialOrd<I::Item>,
2780    {
2781        let ord = self.partial_cmp(other);
2782        ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2783    }
2784
2785    /// Determines if the elements of this `ParallelIterator`
2786    /// are lexicographically greater than those of another.
2787    fn gt<I>(self, other: I) -> bool
2788    where
2789        I: IntoParallelIterator,
2790        I::Iter: IndexedParallelIterator,
2791        Self::Item: PartialOrd<I::Item>,
2792    {
2793        self.partial_cmp(other) == Some(Ordering::Greater)
2794    }
2795
2796    /// Determines if the elements of this `ParallelIterator`
2797    /// are less or equal to those of another.
2798    fn ge<I>(self, other: I) -> bool
2799    where
2800        I: IntoParallelIterator,
2801        I::Iter: IndexedParallelIterator,
2802        Self::Item: PartialOrd<I::Item>,
2803    {
2804        let ord = self.partial_cmp(other);
2805        ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2806    }
2807
2808    /// Yields an index along with each item.
2809    ///
2810    /// # Examples
2811    ///
2812    /// ```
2813    /// use rayon::prelude::*;
2814    ///
2815    /// let chars = vec!['a', 'b', 'c'];
2816    /// let result: Vec<_> = chars
2817    ///     .into_par_iter()
2818    ///     .enumerate()
2819    ///     .collect();
2820    ///
2821    /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2822    /// ```
2823    fn enumerate(self) -> Enumerate<Self> {
2824        Enumerate::new(self)
2825    }
2826
2827    /// Creates an iterator that steps by the given amount
2828    ///
2829    /// # Examples
2830    ///
2831    /// ```
2832    ///use rayon::prelude::*;
2833    ///
2834    /// let range = (3..10);
2835    /// let result: Vec<i32> = range
2836    ///    .into_par_iter()
2837    ///    .step_by(3)
2838    ///    .collect();
2839    ///
2840    /// assert_eq!(result, [3, 6, 9])
2841    /// ```
2842    fn step_by(self, step: usize) -> StepBy<Self> {
2843        StepBy::new(self, step)
2844    }
2845
2846    /// Creates an iterator that skips the first `n` elements.
2847    ///
2848    /// # Examples
2849    ///
2850    /// ```
2851    /// use rayon::prelude::*;
2852    ///
2853    /// let result: Vec<_> = (0..100)
2854    ///     .into_par_iter()
2855    ///     .skip(95)
2856    ///     .collect();
2857    ///
2858    /// assert_eq!(result, [95, 96, 97, 98, 99]);
2859    /// ```
2860    fn skip(self, n: usize) -> Skip<Self> {
2861        Skip::new(self, n)
2862    }
2863
2864    /// Creates an iterator that yields the first `n` elements.
2865    ///
2866    /// # Examples
2867    ///
2868    /// ```
2869    /// use rayon::prelude::*;
2870    ///
2871    /// let result: Vec<_> = (0..100)
2872    ///     .into_par_iter()
2873    ///     .take(5)
2874    ///     .collect();
2875    ///
2876    /// assert_eq!(result, [0, 1, 2, 3, 4]);
2877    /// ```
2878    fn take(self, n: usize) -> Take<Self> {
2879        Take::new(self, n)
2880    }
2881
2882    /// Searches for **some** item in the parallel iterator that
2883    /// matches the given predicate, and returns its index.  Like
2884    /// `ParallelIterator::find_any`, the parallel search will not
2885    /// necessarily find the **first** match, and once a match is
2886    /// found we'll attempt to stop processing any more.
2887    ///
2888    /// # Examples
2889    ///
2890    /// ```
2891    /// use rayon::prelude::*;
2892    ///
2893    /// let a = [1, 2, 3, 3];
2894    ///
2895    /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2896    /// assert!(i == 2 || i == 3);
2897    ///
2898    /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2899    /// ```
2900    fn position_any<P>(self, predicate: P) -> Option<usize>
2901    where
2902        P: Fn(Self::Item) -> bool + Sync + Send,
2903    {
2904        #[inline]
2905        fn check(&(_, p): &(usize, bool)) -> bool {
2906            p
2907        }
2908
2909        let (i, _) = self.map(predicate).enumerate().find_any(check)?;
2910        Some(i)
2911    }
2912
2913    /// Searches for the sequentially **first** item in the parallel iterator
2914    /// that matches the given predicate, and returns its index.
2915    ///
2916    /// Like `ParallelIterator::find_first`, once a match is found,
2917    /// all attempts to the right of the match will be stopped, while
2918    /// attempts to the left must continue in case an earlier match
2919    /// is found.
2920    ///
2921    /// Note that not all parallel iterators have a useful order, much like
2922    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
2923    /// just want the first match that discovered anywhere in the iterator,
2924    /// `position_any` is a better choice.
2925    ///
2926    /// # Examples
2927    ///
2928    /// ```
2929    /// use rayon::prelude::*;
2930    ///
2931    /// let a = [1, 2, 3, 3];
2932    ///
2933    /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2934    ///
2935    /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2936    /// ```
2937    fn position_first<P>(self, predicate: P) -> Option<usize>
2938    where
2939        P: Fn(Self::Item) -> bool + Sync + Send,
2940    {
2941        #[inline]
2942        fn check(&(_, p): &(usize, bool)) -> bool {
2943            p
2944        }
2945
2946        let (i, _) = self.map(predicate).enumerate().find_first(check)?;
2947        Some(i)
2948    }
2949
2950    /// Searches for the sequentially **last** item in the parallel iterator
2951    /// that matches the given predicate, and returns its index.
2952    ///
2953    /// Like `ParallelIterator::find_last`, once a match is found,
2954    /// all attempts to the left of the match will be stopped, while
2955    /// attempts to the right must continue in case a later match
2956    /// is found.
2957    ///
2958    /// Note that not all parallel iterators have a useful order, much like
2959    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
2960    /// order doesn't actually matter to you, `position_any` is a better
2961    /// choice.
2962    ///
2963    /// # Examples
2964    ///
2965    /// ```
2966    /// use rayon::prelude::*;
2967    ///
2968    /// let a = [1, 2, 3, 3];
2969    ///
2970    /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2971    ///
2972    /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2973    /// ```
2974    fn position_last<P>(self, predicate: P) -> Option<usize>
2975    where
2976        P: Fn(Self::Item) -> bool + Sync + Send,
2977    {
2978        #[inline]
2979        fn check(&(_, p): &(usize, bool)) -> bool {
2980            p
2981        }
2982
2983        let (i, _) = self.map(predicate).enumerate().find_last(check)?;
2984        Some(i)
2985    }
2986
2987    #[doc(hidden)]
2988    #[deprecated(
2989        note = "parallel `position` does not search in order -- use `position_any`, \\
2990                `position_first`, or `position_last`"
2991    )]
2992    fn position<P>(self, predicate: P) -> Option<usize>
2993    where
2994        P: Fn(Self::Item) -> bool + Sync + Send,
2995    {
2996        self.position_any(predicate)
2997    }
2998
2999    /// Searches for items in the parallel iterator that match the given
3000    /// predicate, and returns their indices.
3001    ///
3002    /// # Examples
3003    ///
3004    /// ```
3005    /// use rayon::prelude::*;
3006    ///
3007    /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3008    ///
3009    /// // Find the positions of primes congruent to 1 modulo 6
3010    /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3011    /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3012    ///
3013    /// // Find the positions of primes congruent to 5 modulo 6
3014    /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3015    /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3016    /// ```
3017    fn positions<P>(self, predicate: P) -> Positions<Self, P>
3018    where
3019        P: Fn(Self::Item) -> bool + Sync + Send,
3020    {
3021        Positions::new(self, predicate)
3022    }
3023
3024    /// Produces a new iterator with the elements of this iterator in
3025    /// reverse order.
3026    ///
3027    /// # Examples
3028    ///
3029    /// ```
3030    /// use rayon::prelude::*;
3031    ///
3032    /// let result: Vec<_> = (0..5)
3033    ///     .into_par_iter()
3034    ///     .rev()
3035    ///     .collect();
3036    ///
3037    /// assert_eq!(result, [4, 3, 2, 1, 0]);
3038    /// ```
3039    fn rev(self) -> Rev<Self> {
3040        Rev::new(self)
3041    }
3042
3043    /// Sets the minimum length of iterators desired to process in each
3044    /// rayon job.  Rayon will not split any smaller than this length, but
3045    /// of course an iterator could already be smaller to begin with.
3046    ///
3047    /// Producers like `zip` and `interleave` will use greater of the two
3048    /// minimums.
3049    /// Chained iterators and iterators inside `flat_map` may each use
3050    /// their own minimum length.
3051    ///
3052    /// # Examples
3053    ///
3054    /// ```
3055    /// use rayon::prelude::*;
3056    ///
3057    /// let min = (0..1_000_000)
3058    ///     .into_par_iter()
3059    ///     .with_min_len(1234)
3060    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3061    ///     .min().unwrap();
3062    ///
3063    /// assert!(min >= 1234);
3064    /// ```
3065    fn with_min_len(self, min: usize) -> MinLen<Self> {
3066        MinLen::new(self, min)
3067    }
3068
3069    /// Sets the maximum length of iterators desired to process in each
3070    /// rayon job.  Rayon will try to split at least below this length,
3071    /// unless that would put it below the length from `with_min_len()`.
3072    /// For example, given min=10 and max=15, a length of 16 will not be
3073    /// split any further.
3074    ///
3075    /// Producers like `zip` and `interleave` will use lesser of the two
3076    /// maximums.
3077    /// Chained iterators and iterators inside `flat_map` may each use
3078    /// their own maximum length.
3079    ///
3080    /// # Examples
3081    ///
3082    /// ```
3083    /// use rayon::prelude::*;
3084    ///
3085    /// let max = (0..1_000_000)
3086    ///     .into_par_iter()
3087    ///     .with_max_len(1234)
3088    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3089    ///     .max().unwrap();
3090    ///
3091    /// assert!(max <= 1234);
3092    /// ```
3093    fn with_max_len(self, max: usize) -> MaxLen<Self> {
3094        MaxLen::new(self, max)
3095    }
3096
3097    /// Produces an exact count of how many items this iterator will
3098    /// produce, presuming no panic occurs.
3099    ///
3100    /// # Examples
3101    ///
3102    /// ```
3103    /// use rayon::prelude::*;
3104    ///
3105    /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3106    /// assert_eq!(par_iter.len(), 10);
3107    ///
3108    /// let vec: Vec<_> = par_iter.collect();
3109    /// assert_eq!(vec.len(), 10);
3110    /// ```
3111    fn len(&self) -> usize;
3112
3113    /// Internal method used to define the behavior of this parallel
3114    /// iterator. You should not need to call this directly.
3115    ///
3116    /// This method causes the iterator `self` to start producing
3117    /// items and to feed them to the consumer `consumer` one by one.
3118    /// It may split the consumer before doing so to create the
3119    /// opportunity to produce in parallel. If a split does happen, it
3120    /// will inform the consumer of the index where the split should
3121    /// occur (unlike `ParallelIterator::drive_unindexed()`).
3122    ///
3123    /// See the [README] for more details on the internals of parallel
3124    /// iterators.
3125    ///
3126    /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
3127    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3128
3129    /// Internal method used to define the behavior of this parallel
3130    /// iterator. You should not need to call this directly.
3131    ///
3132    /// This method converts the iterator into a producer P and then
3133    /// invokes `callback.callback()` with P. Note that the type of
3134    /// this producer is not defined as part of the API, since
3135    /// `callback` must be defined generically for all producers. This
3136    /// allows the producer type to contain references; it also means
3137    /// that parallel iterators can adjust that type without causing a
3138    /// breaking change.
3139    ///
3140    /// See the [README] for more details on the internals of parallel
3141    /// iterators.
3142    ///
3143    /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
3144    fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3145}
3146
3147/// `FromParallelIterator` implements the creation of a collection
3148/// from a [`ParallelIterator`]. By implementing
3149/// `FromParallelIterator` for a given type, you define how it will be
3150/// created from an iterator.
3151///
3152/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3153///
3154/// [`ParallelIterator`]: trait.ParallelIterator.html
3155/// [`collect()`]: trait.ParallelIterator.html#method.collect
3156///
3157/// # Examples
3158///
3159/// Implementing `FromParallelIterator` for your type:
3160///
3161/// ```
3162/// use rayon::prelude::*;
3163/// use std::mem;
3164///
3165/// struct BlackHole {
3166///     mass: usize,
3167/// }
3168///
3169/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3170///     fn from_par_iter<I>(par_iter: I) -> Self
3171///         where I: IntoParallelIterator<Item = T>
3172///     {
3173///         let par_iter = par_iter.into_par_iter();
3174///         BlackHole {
3175///             mass: par_iter.count() * mem::size_of::<T>(),
3176///         }
3177///     }
3178/// }
3179///
3180/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3181/// assert_eq!(bh.mass, 4000);
3182/// ```
3183pub trait FromParallelIterator<T>
3184where
3185    T: Send,
3186{
3187    /// Creates an instance of the collection from the parallel iterator `par_iter`.
3188    ///
3189    /// If your collection is not naturally parallel, the easiest (and
3190    /// fastest) way to do this is often to collect `par_iter` into a
3191    /// [`LinkedList`] or other intermediate data structure and then
3192    /// sequentially extend your collection. However, a more 'native'
3193    /// technique is to use the [`par_iter.fold`] or
3194    /// [`par_iter.fold_with`] methods to create the collection.
3195    /// Alternatively, if your collection is 'natively' parallel, you
3196    /// can use `par_iter.for_each` to process each element in turn.
3197    ///
3198    /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
3199    /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
3200    /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
3201    /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
3202    fn from_par_iter<I>(par_iter: I) -> Self
3203    where
3204        I: IntoParallelIterator<Item = T>;
3205}
3206
3207/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3208///
3209/// [`ParallelIterator`]: trait.ParallelIterator.html
3210///
3211/// # Examples
3212///
3213/// Implementing `ParallelExtend` for your type:
3214///
3215/// ```
3216/// use rayon::prelude::*;
3217/// use std::mem;
3218///
3219/// struct BlackHole {
3220///     mass: usize,
3221/// }
3222///
3223/// impl<T: Send> ParallelExtend<T> for BlackHole {
3224///     fn par_extend<I>(&mut self, par_iter: I)
3225///         where I: IntoParallelIterator<Item = T>
3226///     {
3227///         let par_iter = par_iter.into_par_iter();
3228///         self.mass += par_iter.count() * mem::size_of::<T>();
3229///     }
3230/// }
3231///
3232/// let mut bh = BlackHole { mass: 0 };
3233/// bh.par_extend(0i32..1000);
3234/// assert_eq!(bh.mass, 4000);
3235/// bh.par_extend(0i64..10);
3236/// assert_eq!(bh.mass, 4080);
3237/// ```
3238pub trait ParallelExtend<T>
3239where
3240    T: Send,
3241{
3242    /// Extends an instance of the collection with the elements drawn
3243    /// from the parallel iterator `par_iter`.
3244    ///
3245    /// # Examples
3246    ///
3247    /// ```
3248    /// use rayon::prelude::*;
3249    ///
3250    /// let mut vec = vec![];
3251    /// vec.par_extend(0..5);
3252    /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3253    /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3254    /// ```
3255    fn par_extend<I>(&mut self, par_iter: I)
3256    where
3257        I: IntoParallelIterator<Item = T>;
3258}
3259
3260/// `ParallelDrainFull` creates a parallel iterator that moves all items
3261/// from a collection while retaining the original capacity.
3262///
3263/// Types which are indexable typically implement [`ParallelDrainRange`]
3264/// instead, where you can drain fully with `par_drain(..)`.
3265///
3266/// [`ParallelDrainRange`]: trait.ParallelDrainRange.html
3267pub trait ParallelDrainFull {
3268    /// The draining parallel iterator type that will be created.
3269    type Iter: ParallelIterator<Item = Self::Item>;
3270
3271    /// The type of item that the parallel iterator will produce.
3272    /// This is usually the same as `IntoParallelIterator::Item`.
3273    type Item: Send;
3274
3275    /// Returns a draining parallel iterator over an entire collection.
3276    ///
3277    /// When the iterator is dropped, all items are removed, even if the
3278    /// iterator was not fully consumed. If the iterator is leaked, for example
3279    /// using `std::mem::forget`, it is unspecified how many items are removed.
3280    ///
3281    /// # Examples
3282    ///
3283    /// ```
3284    /// use rayon::prelude::*;
3285    /// use std::collections::{BinaryHeap, HashSet};
3286    ///
3287    /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3288    ///
3289    /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3290    /// assert_eq!(
3291    ///     // heaps are drained in arbitrary order
3292    ///     heap.par_drain()
3293    ///         .inspect(|x| assert!(squares.contains(x)))
3294    ///         .count(),
3295    ///     squares.len(),
3296    /// );
3297    /// assert!(heap.is_empty());
3298    /// assert!(heap.capacity() >= squares.len());
3299    /// ```
3300    fn par_drain(self) -> Self::Iter;
3301}
3302
3303/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3304/// from a collection while retaining the original capacity.
3305///
3306/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3307///
3308/// [`ParallelDrainFull`]: trait.ParallelDrainFull.html
3309pub trait ParallelDrainRange<Idx = usize> {
3310    /// The draining parallel iterator type that will be created.
3311    type Iter: ParallelIterator<Item = Self::Item>;
3312
3313    /// The type of item that the parallel iterator will produce.
3314    /// This is usually the same as `IntoParallelIterator::Item`.
3315    type Item: Send;
3316
3317    /// Returns a draining parallel iterator over a range of the collection.
3318    ///
3319    /// When the iterator is dropped, all items in the range are removed, even
3320    /// if the iterator was not fully consumed. If the iterator is leaked, for
3321    /// example using `std::mem::forget`, it is unspecified how many items are
3322    /// removed.
3323    ///
3324    /// # Examples
3325    ///
3326    /// ```
3327    /// use rayon::prelude::*;
3328    ///
3329    /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3330    ///
3331    /// println!("RangeFull");
3332    /// let mut vec = squares.clone();
3333    /// assert!(vec.par_drain(..)
3334    ///            .eq(squares.par_iter().copied()));
3335    /// assert!(vec.is_empty());
3336    /// assert!(vec.capacity() >= squares.len());
3337    ///
3338    /// println!("RangeFrom");
3339    /// let mut vec = squares.clone();
3340    /// assert!(vec.par_drain(5..)
3341    ///            .eq(squares[5..].par_iter().copied()));
3342    /// assert_eq!(&vec[..], &squares[..5]);
3343    /// assert!(vec.capacity() >= squares.len());
3344    ///
3345    /// println!("RangeTo");
3346    /// let mut vec = squares.clone();
3347    /// assert!(vec.par_drain(..5)
3348    ///            .eq(squares[..5].par_iter().copied()));
3349    /// assert_eq!(&vec[..], &squares[5..]);
3350    /// assert!(vec.capacity() >= squares.len());
3351    ///
3352    /// println!("RangeToInclusive");
3353    /// let mut vec = squares.clone();
3354    /// assert!(vec.par_drain(..=5)
3355    ///            .eq(squares[..=5].par_iter().copied()));
3356    /// assert_eq!(&vec[..], &squares[6..]);
3357    /// assert!(vec.capacity() >= squares.len());
3358    ///
3359    /// println!("Range");
3360    /// let mut vec = squares.clone();
3361    /// assert!(vec.par_drain(3..7)
3362    ///            .eq(squares[3..7].par_iter().copied()));
3363    /// assert_eq!(&vec[..3], &squares[..3]);
3364    /// assert_eq!(&vec[3..], &squares[7..]);
3365    /// assert!(vec.capacity() >= squares.len());
3366    ///
3367    /// println!("RangeInclusive");
3368    /// let mut vec = squares.clone();
3369    /// assert!(vec.par_drain(3..=7)
3370    ///            .eq(squares[3..=7].par_iter().copied()));
3371    /// assert_eq!(&vec[..3], &squares[..3]);
3372    /// assert_eq!(&vec[3..], &squares[8..]);
3373    /// assert!(vec.capacity() >= squares.len());
3374    /// ```
3375    fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3376}
3377
3378/// We hide the `Try` trait in a private module, as it's only meant to be a
3379/// stable clone of the standard library's `Try` trait, as yet unstable.
3380mod private {
3381    use std::convert::Infallible;
3382    use std::ops::ControlFlow::{self, Break, Continue};
3383    use std::task::Poll;
3384
3385    /// Clone of `std::ops::Try`.
3386    ///
3387    /// Implementing this trait is not permitted outside of `rayon`.
3388    pub trait Try {
3389        private_decl! {}
3390
3391        type Output;
3392        type Residual;
3393
3394        fn from_output(output: Self::Output) -> Self;
3395
3396        fn from_residual(residual: Self::Residual) -> Self;
3397
3398        fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3399    }
3400
3401    impl<B, C> Try for ControlFlow<B, C> {
3402        private_impl! {}
3403
3404        type Output = C;
3405        type Residual = ControlFlow<B, Infallible>;
3406
3407        fn from_output(output: Self::Output) -> Self {
3408            Continue(output)
3409        }
3410
3411        fn from_residual(residual: Self::Residual) -> Self {
3412            match residual {
3413                Break(b) => Break(b),
3414                Continue(_) => unreachable!(),
3415            }
3416        }
3417
3418        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3419            match self {
3420                Continue(c) => Continue(c),
3421                Break(b) => Break(Break(b)),
3422            }
3423        }
3424    }
3425
3426    impl<T> Try for Option<T> {
3427        private_impl! {}
3428
3429        type Output = T;
3430        type Residual = Option<Infallible>;
3431
3432        fn from_output(output: Self::Output) -> Self {
3433            Some(output)
3434        }
3435
3436        fn from_residual(residual: Self::Residual) -> Self {
3437            match residual {
3438                None => None,
3439                Some(_) => unreachable!(),
3440            }
3441        }
3442
3443        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3444            match self {
3445                Some(c) => Continue(c),
3446                None => Break(None),
3447            }
3448        }
3449    }
3450
3451    impl<T, E> Try for Result<T, E> {
3452        private_impl! {}
3453
3454        type Output = T;
3455        type Residual = Result<Infallible, E>;
3456
3457        fn from_output(output: Self::Output) -> Self {
3458            Ok(output)
3459        }
3460
3461        fn from_residual(residual: Self::Residual) -> Self {
3462            match residual {
3463                Err(e) => Err(e),
3464                Ok(_) => unreachable!(),
3465            }
3466        }
3467
3468        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3469            match self {
3470                Ok(c) => Continue(c),
3471                Err(e) => Break(Err(e)),
3472            }
3473        }
3474    }
3475
3476    impl<T, E> Try for Poll<Result<T, E>> {
3477        private_impl! {}
3478
3479        type Output = Poll<T>;
3480        type Residual = Result<Infallible, E>;
3481
3482        fn from_output(output: Self::Output) -> Self {
3483            output.map(Ok)
3484        }
3485
3486        fn from_residual(residual: Self::Residual) -> Self {
3487            match residual {
3488                Err(e) => Poll::Ready(Err(e)),
3489                Ok(_) => unreachable!(),
3490            }
3491        }
3492
3493        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3494            match self {
3495                Poll::Pending => Continue(Poll::Pending),
3496                Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3497                Poll::Ready(Err(e)) => Break(Err(e)),
3498            }
3499        }
3500    }
3501
3502    impl<T, E> Try for Poll<Option<Result<T, E>>> {
3503        private_impl! {}
3504
3505        type Output = Poll<Option<T>>;
3506        type Residual = Result<Infallible, E>;
3507
3508        fn from_output(output: Self::Output) -> Self {
3509            match output {
3510                Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3511                Poll::Pending => Poll::Pending,
3512            }
3513        }
3514
3515        fn from_residual(residual: Self::Residual) -> Self {
3516            match residual {
3517                Err(e) => Poll::Ready(Some(Err(e))),
3518                Ok(_) => unreachable!(),
3519            }
3520        }
3521
3522        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3523            match self {
3524                Poll::Pending => Continue(Poll::Pending),
3525                Poll::Ready(None) => Continue(Poll::Ready(None)),
3526                Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3527                Poll::Ready(Some(Err(e))) => Break(Err(e)),
3528            }
3529        }
3530    }
3531}