Trait maybe_rayon::prelude::IndexedParallelIterator
source · pub trait IndexedParallelIterator: ParallelIterator {
Show 31 methods
fn len(&self) -> usize;
fn drive<C>(self, consumer: C) -> <C as Consumer<Self::Item>>::Result
where
C: Consumer<Self::Item>;
fn with_producer<CB>(
self,
callback: CB
) -> <CB as ProducerCallback<Self::Item>>::Output
where
CB: ProducerCallback<Self::Item>;
fn collect_into_vec(self, target: &mut Vec<Self::Item, Global>) { ... }
fn unzip_into_vecs<A, B>(
self,
left: &mut Vec<A, Global>,
right: &mut Vec<B, Global>
)
where
Self: IndexedParallelIterator<Item = (A, B)>,
A: Send,
B: Send,
{ ... }
fn zip<Z>(self, zip_op: Z) -> Zip<Self, <Z as IntoParallelIterator>::Iter>
where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
{ ... }
fn zip_eq<Z>(
self,
zip_op: Z
) -> ZipEq<Self, <Z as IntoParallelIterator>::Iter>
where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
{ ... }
fn interleave<I>(
self,
other: I
) -> Interleave<Self, <I as IntoParallelIterator>::Iter>
where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
{ ... }
fn interleave_shortest<I>(
self,
other: I
) -> InterleaveShortest<Self, <I as IntoParallelIterator>::Iter>
where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
{ ... }
fn chunks(self, chunk_size: usize) -> Chunks<Self> { ... }
fn fold_chunks<T, ID, F>(
self,
chunk_size: usize,
identity: ID,
fold_op: F
) -> FoldChunks<Self, ID, F>
where
ID: Fn() -> T + Send + Sync,
F: Fn(T, Self::Item) -> T + Send + Sync,
T: Send,
{ ... }
fn fold_chunks_with<T, F>(
self,
chunk_size: usize,
init: T,
fold_op: F
) -> FoldChunksWith<Self, T, F>
where
T: Send + Clone,
F: Fn(T, Self::Item) -> T + Send + Sync,
{ ... }
fn cmp<I>(self, other: I) -> Ordering
where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: Ord,
{ ... }
fn partial_cmp<I>(self, other: I) -> Option<Ordering>
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
{ ... }
fn eq<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
{ ... }
fn ne<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
{ ... }
fn lt<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
{ ... }
fn le<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
{ ... }
fn gt<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
{ ... }
fn ge<I>(self, other: I) -> bool
where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
{ ... }
fn enumerate(self) -> Enumerate<Self> { ... }
fn step_by(self, step: usize) -> StepBy<Self> { ... }
fn skip(self, n: usize) -> Skip<Self> { ... }
fn take(self, n: usize) -> Take<Self> { ... }
fn position_any<P>(self, predicate: P) -> Option<usize>
where
P: Fn(Self::Item) -> bool + Sync + Send,
{ ... }
fn position_first<P>(self, predicate: P) -> Option<usize>
where
P: Fn(Self::Item) -> bool + Sync + Send,
{ ... }
fn position_last<P>(self, predicate: P) -> Option<usize>
where
P: Fn(Self::Item) -> bool + Sync + Send,
{ ... }
fn positions<P>(self, predicate: P) -> Positions<Self, P>
where
P: Fn(Self::Item) -> bool + Sync + Send,
{ ... }
fn rev(self) -> Rev<Self> { ... }
fn with_min_len(self, min: usize) -> MinLen<Self> { ... }
fn with_max_len(self, max: usize) -> MaxLen<Self> { ... }
}
Expand description
An iterator that supports “random access” to its data, meaning that you can split it at arbitrary indices and draw data from those points.
Note: Not implemented for u64
, i64
, u128
, or i128
ranges
Required Methods§
sourcefn len(&self) -> usize
fn len(&self) -> usize
Produces an exact count of how many items this iterator will produce, presuming no panic occurs.
Examples
use rayon::prelude::*;
let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
assert_eq!(par_iter.len(), 10);
let vec: Vec<_> = par_iter.collect();
assert_eq!(vec.len(), 10);
sourcefn drive<C>(self, consumer: C) -> <C as Consumer<Self::Item>>::Resultwhere
C: Consumer<Self::Item>,
fn drive<C>(self, consumer: C) -> <C as Consumer<Self::Item>>::Resultwhere
C: Consumer<Self::Item>,
Internal method used to define the behavior of this parallel iterator. You should not need to call this directly.
This method causes the iterator self
to start producing
items and to feed them to the consumer consumer
one by one.
It may split the consumer before doing so to create the
opportunity to produce in parallel. If a split does happen, it
will inform the consumer of the index where the split should
occur (unlike ParallelIterator::drive_unindexed()
).
See the README for more details on the internals of parallel iterators.
sourcefn with_producer<CB>(
self,
callback: CB
) -> <CB as ProducerCallback<Self::Item>>::Outputwhere
CB: ProducerCallback<Self::Item>,
fn with_producer<CB>(
self,
callback: CB
) -> <CB as ProducerCallback<Self::Item>>::Outputwhere
CB: ProducerCallback<Self::Item>,
Internal method used to define the behavior of this parallel iterator. You should not need to call this directly.
This method converts the iterator into a producer P and then
invokes callback.callback()
with P. Note that the type of
this producer is not defined as part of the API, since
callback
must be defined generically for all producers. This
allows the producer type to contain references; it also means
that parallel iterators can adjust that type without causing a
breaking change.
See the README for more details on the internals of parallel iterators.
Provided Methods§
sourcefn collect_into_vec(self, target: &mut Vec<Self::Item, Global>)
fn collect_into_vec(self, target: &mut Vec<Self::Item, Global>)
Collects the results of the iterator into the specified vector. The vector is always truncated before execution begins. If possible, reusing the vector across calls can lead to better performance since it reuses the same backing buffer.
Examples
use rayon::prelude::*;
// any prior data will be truncated
let mut vec = vec![-1, -2, -3];
(0..5).into_par_iter()
.collect_into_vec(&mut vec);
assert_eq!(vec, [0, 1, 2, 3, 4]);
sourcefn unzip_into_vecs<A, B>(
self,
left: &mut Vec<A, Global>,
right: &mut Vec<B, Global>
)where
Self: IndexedParallelIterator<Item = (A, B)>,
A: Send,
B: Send,
fn unzip_into_vecs<A, B>(
self,
left: &mut Vec<A, Global>,
right: &mut Vec<B, Global>
)where
Self: IndexedParallelIterator<Item = (A, B)>,
A: Send,
B: Send,
Unzips the results of the iterator into the specified vectors. The vectors are always truncated before execution begins. If possible, reusing the vectors across calls can lead to better performance since they reuse the same backing buffer.
Examples
use rayon::prelude::*;
// any prior data will be truncated
let mut left = vec![42; 10];
let mut right = vec![-1; 10];
(10..15).into_par_iter()
.enumerate()
.unzip_into_vecs(&mut left, &mut right);
assert_eq!(left, [0, 1, 2, 3, 4]);
assert_eq!(right, [10, 11, 12, 13, 14]);
sourcefn zip<Z>(self, zip_op: Z) -> Zip<Self, <Z as IntoParallelIterator>::Iter>where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
fn zip<Z>(self, zip_op: Z) -> Zip<Self, <Z as IntoParallelIterator>::Iter>where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
Iterates over tuples (A, B)
, where the items A
are from
this iterator and B
are from the iterator given as argument.
Like the zip
method on ordinary iterators, if the two
iterators are of unequal length, you only get the items they
have in common.
Examples
use rayon::prelude::*;
let result: Vec<_> = (1..4)
.into_par_iter()
.zip(vec!['a', 'b', 'c'])
.collect();
assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
sourcefn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, <Z as IntoParallelIterator>::Iter>where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, <Z as IntoParallelIterator>::Iter>where
Z: IntoParallelIterator,
<Z as IntoParallelIterator>::Iter: IndexedParallelIterator,
The same as Zip
, but requires that both iterators have the same length.
Panics
Will panic if self
and zip_op
are not the same length.
use rayon::prelude::*;
let one = [1u8];
let two = [2u8, 2];
let one_iter = one.par_iter();
let two_iter = two.par_iter();
// this will panic
let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
// we should never get here
assert_eq!(1, zipped.len());
sourcefn interleave<I>(
self,
other: I
) -> Interleave<Self, <I as IntoParallelIterator>::Iter>where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
fn interleave<I>(
self,
other: I
) -> Interleave<Self, <I as IntoParallelIterator>::Iter>where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
Interleaves elements of this iterator and the other given iterator. Alternately yields elements from this iterator and the given iterator, until both are exhausted. If one iterator is exhausted before the other, the last elements are provided from the other.
Examples
use rayon::prelude::*;
let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
sourcefn interleave_shortest<I>(
self,
other: I
) -> InterleaveShortest<Self, <I as IntoParallelIterator>::Iter>where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
fn interleave_shortest<I>(
self,
other: I
) -> InterleaveShortest<Self, <I as IntoParallelIterator>::Iter>where
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,
Interleaves elements of this iterator and the other given iterator, until one is exhausted.
Examples
use rayon::prelude::*;
let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
assert_eq!(r, vec![1, 5, 2, 6, 3]);
sourcefn chunks(self, chunk_size: usize) -> Chunks<Self>
fn chunks(self, chunk_size: usize) -> Chunks<Self>
Splits an iterator up into fixed-size chunks.
Returns an iterator that returns Vec
s of the given number of elements.
If the number of elements in the iterator is not divisible by chunk_size
,
the last chunk may be shorter than chunk_size
.
See also par_chunks()
and par_chunks_mut()
for similar behavior on
slices, without having to allocate intermediate Vec
s for the chunks.
Examples
use rayon::prelude::*;
let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
sourcefn fold_chunks<T, ID, F>(
self,
chunk_size: usize,
identity: ID,
fold_op: F
) -> FoldChunks<Self, ID, F>where
ID: Fn() -> T + Send + Sync,
F: Fn(T, Self::Item) -> T + Send + Sync,
T: Send,
fn fold_chunks<T, ID, F>(
self,
chunk_size: usize,
identity: ID,
fold_op: F
) -> FoldChunks<Self, ID, F>where
ID: Fn() -> T + Send + Sync,
F: Fn(T, Self::Item) -> T + Send + Sync,
T: Send,
Splits an iterator into fixed-size chunks, performing a sequential fold()
on
each chunk.
Returns an iterator that produces a folded result for each chunk of items produced by this iterator.
This works essentially like:
iter.chunks(chunk_size)
.map(|chunk|
chunk.into_iter()
.fold(identity, fold_op)
)
except there is no per-chunk allocation overhead.
Panics if chunk_size
is 0.
Examples
use rayon::prelude::*;
let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
sourcefn fold_chunks_with<T, F>(
self,
chunk_size: usize,
init: T,
fold_op: F
) -> FoldChunksWith<Self, T, F>where
T: Send + Clone,
F: Fn(T, Self::Item) -> T + Send + Sync,
fn fold_chunks_with<T, F>(
self,
chunk_size: usize,
init: T,
fold_op: F
) -> FoldChunksWith<Self, T, F>where
T: Send + Clone,
F: Fn(T, Self::Item) -> T + Send + Sync,
Splits an iterator into fixed-size chunks, performing a sequential fold()
on
each chunk.
Returns an iterator that produces a folded result for each chunk of items produced by this iterator.
This works essentially like fold_chunks(chunk_size, || init.clone(), fold_op)
,
except it doesn’t require the init
type to be Sync
, nor any other form of
added synchronization.
Panics if chunk_size
is 0.
Examples
use rayon::prelude::*;
let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
sourcefn cmp<I>(self, other: I) -> Orderingwhere
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: Ord,
fn cmp<I>(self, other: I) -> Orderingwhere
I: IntoParallelIterator<Item = Self::Item>,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: Ord,
Lexicographically compares the elements of this ParallelIterator
with those of
another.
Examples
use rayon::prelude::*;
use std::cmp::Ordering::*;
let x = vec![1, 2, 3];
assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
sourcefn partial_cmp<I>(self, other: I) -> Option<Ordering>where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
fn partial_cmp<I>(self, other: I) -> Option<Ordering>where
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
Lexicographically compares the elements of this ParallelIterator
with those of
another.
Examples
use rayon::prelude::*;
use std::cmp::Ordering::*;
use std::f64::NAN;
let x = vec![1.0, 2.0, 3.0];
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
sourcefn eq<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
fn eq<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are equal to those of another
sourcefn ne<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
fn ne<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialEq<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are unequal to those of another
sourcefn lt<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
fn lt<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are lexicographically less than those of another.
sourcefn le<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
fn le<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are less or equal to those of another.
sourcefn gt<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
fn gt<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are lexicographically greater than those of another.
sourcefn ge<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
fn ge<I>(self, other: I) -> boolwhere
I: IntoParallelIterator,
<I as IntoParallelIterator>::Iter: IndexedParallelIterator,
Self::Item: PartialOrd<<I as IntoParallelIterator>::Item>,
Determines if the elements of this ParallelIterator
are less or equal to those of another.
sourcefn enumerate(self) -> Enumerate<Self>
fn enumerate(self) -> Enumerate<Self>
Yields an index along with each item.
Examples
use rayon::prelude::*;
let chars = vec!['a', 'b', 'c'];
let result: Vec<_> = chars
.into_par_iter()
.enumerate()
.collect();
assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
sourcefn step_by(self, step: usize) -> StepBy<Self>
fn step_by(self, step: usize) -> StepBy<Self>
Creates an iterator that steps by the given amount
Examples
use rayon::prelude::*;
let range = (3..10);
let result: Vec<i32> = range
.into_par_iter()
.step_by(3)
.collect();
assert_eq!(result, [3, 6, 9])
sourcefn skip(self, n: usize) -> Skip<Self>
fn skip(self, n: usize) -> Skip<Self>
Creates an iterator that skips the first n
elements.
Examples
use rayon::prelude::*;
let result: Vec<_> = (0..100)
.into_par_iter()
.skip(95)
.collect();
assert_eq!(result, [95, 96, 97, 98, 99]);
sourcefn take(self, n: usize) -> Take<Self>
fn take(self, n: usize) -> Take<Self>
Creates an iterator that yields the first n
elements.
Examples
use rayon::prelude::*;
let result: Vec<_> = (0..100)
.into_par_iter()
.take(5)
.collect();
assert_eq!(result, [0, 1, 2, 3, 4]);
sourcefn position_any<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
fn position_any<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
Searches for some item in the parallel iterator that
matches the given predicate, and returns its index. Like
ParallelIterator::find_any
, the parallel search will not
necessarily find the first match, and once a match is
found we’ll attempt to stop processing any more.
Examples
use rayon::prelude::*;
let a = [1, 2, 3, 3];
let i = a.par_iter().position_any(|&x| x == 3).expect("found");
assert!(i == 2 || i == 3);
assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
sourcefn position_first<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
fn position_first<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
Searches for the sequentially first item in the parallel iterator that matches the given predicate, and returns its index.
Like ParallelIterator::find_first
, once a match is found,
all attempts to the right of the match will be stopped, while
attempts to the left must continue in case an earlier match
is found.
Note that not all parallel iterators have a useful order, much like
sequential HashMap
iteration, so “first” may be nebulous. If you
just want the first match that discovered anywhere in the iterator,
position_any
is a better choice.
Examples
use rayon::prelude::*;
let a = [1, 2, 3, 3];
assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
sourcefn position_last<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
fn position_last<P>(self, predicate: P) -> Option<usize>where
P: Fn(Self::Item) -> bool + Sync + Send,
Searches for the sequentially last item in the parallel iterator that matches the given predicate, and returns its index.
Like ParallelIterator::find_last
, once a match is found,
all attempts to the left of the match will be stopped, while
attempts to the right must continue in case a later match
is found.
Note that not all parallel iterators have a useful order, much like
sequential HashMap
iteration, so “last” may be nebulous. When the
order doesn’t actually matter to you, position_any
is a better
choice.
Examples
use rayon::prelude::*;
let a = [1, 2, 3, 3];
assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
sourcefn positions<P>(self, predicate: P) -> Positions<Self, P>where
P: Fn(Self::Item) -> bool + Sync + Send,
fn positions<P>(self, predicate: P) -> Positions<Self, P>where
P: Fn(Self::Item) -> bool + Sync + Send,
Searches for items in the parallel iterator that match the given predicate, and returns their indices.
Examples
use rayon::prelude::*;
let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
// Find the positions of primes congruent to 1 modulo 6
let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
// Find the positions of primes congruent to 5 modulo 6
let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
sourcefn rev(self) -> Rev<Self>
fn rev(self) -> Rev<Self>
Produces a new iterator with the elements of this iterator in reverse order.
Examples
use rayon::prelude::*;
let result: Vec<_> = (0..5)
.into_par_iter()
.rev()
.collect();
assert_eq!(result, [4, 3, 2, 1, 0]);
sourcefn with_min_len(self, min: usize) -> MinLen<Self>
fn with_min_len(self, min: usize) -> MinLen<Self>
Sets the minimum length of iterators desired to process in each rayon job. Rayon will not split any smaller than this length, but of course an iterator could already be smaller to begin with.
Producers like zip
and interleave
will use greater of the two
minimums.
Chained iterators and iterators inside flat_map
may each use
their own minimum length.
Examples
use rayon::prelude::*;
let min = (0..1_000_000)
.into_par_iter()
.with_min_len(1234)
.fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
.min().unwrap();
assert!(min >= 1234);
sourcefn with_max_len(self, max: usize) -> MaxLen<Self>
fn with_max_len(self, max: usize) -> MaxLen<Self>
Sets the maximum length of iterators desired to process in each
rayon job. Rayon will try to split at least below this length,
unless that would put it below the length from with_min_len()
.
For example, given min=10 and max=15, a length of 16 will not be
split any further.
Producers like zip
and interleave
will use lesser of the two
maximums.
Chained iterators and iterators inside flat_map
may each use
their own maximum length.
Examples
use rayon::prelude::*;
let max = (0..1_000_000)
.into_par_iter()
.with_max_len(1234)
.fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
.max().unwrap();
assert!(max <= 1234);