Coverage Report

Created: 2025-10-14 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/rayon-1.11.0/src/iter/mod.rs
Line
Count
Source
1
//! Traits for writing parallel programs using an iterator-style interface
2
//!
3
//! You will rarely need to interact with this module directly unless you have
4
//! need to name one of the iterator types.
5
//!
6
//! Parallel iterators make it easy to write iterator-like chains that
7
//! execute in parallel: typically all you have to do is convert the
8
//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9
//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10
//! example, to compute the sum of the squares of a sequence of
11
//! integers, one might write:
12
//!
13
//! ```rust
14
//! use rayon::prelude::*;
15
//! fn sum_of_squares(input: &[i32]) -> i32 {
16
//!     input.par_iter()
17
//!          .map(|i| i * i)
18
//!          .sum()
19
//! }
20
//! ```
21
//!
22
//! Or, to increment all the integers in a slice, you could write:
23
//!
24
//! ```rust
25
//! use rayon::prelude::*;
26
//! fn increment_all(input: &mut [i32]) {
27
//!     input.par_iter_mut()
28
//!          .for_each(|p| *p += 1);
29
//! }
30
//! ```
31
//!
32
//! To use parallel iterators, first import the traits by adding
33
//! something like `use rayon::prelude::*` to your module. You can
34
//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35
//! parallel iterator. Like a [regular iterator][], parallel
36
//! iterators work by first constructing a computation and then
37
//! executing it.
38
//!
39
//! In addition to `par_iter()` and friends, some types offer other
40
//! ways to create (or consume) parallel iterators:
41
//!
42
//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43
//!   `par_windows`, as well as various parallel sorting
44
//!   operations. See [the `ParallelSlice` trait] for the full list.
45
//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46
//!   See [the `ParallelString` trait] for the full list.
47
//! - Various collections offer [`par_extend`], which grows a
48
//!   collection given a parallel iterator. (If you don't have a
49
//!   collection to extend, you can use [`collect()`] to create a new
50
//!   one from scratch.)
51
//!
52
//! [the `ParallelSlice` trait]: crate::slice::ParallelSlice
53
//! [the `ParallelString` trait]: crate::str::ParallelString
54
//! [`par_extend`]: ParallelExtend
55
//! [`collect()`]: ParallelIterator::collect()
56
//!
57
//! To see the full range of methods available on parallel iterators,
58
//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59
//! traits.
60
//!
61
//! If you'd like to build a custom parallel iterator, or to write your own
62
//! combinator, then check out the [split] function and the [plumbing] module.
63
//!
64
//! [regular iterator]: Iterator
65
//! [split]: split()
66
//! [plumbing]: plumbing
67
//!
68
//! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
69
//! has been deliberately obscured from the public API.  This trait is intended
70
//! to mirror the unstable `std::ops::Try` with implementations for `Option` and
71
//! `Result`, where `Some`/`Ok` values will let those iterators continue, but
72
//! `None`/`Err` values will exit early.
73
//!
74
//! A note about object safety: It is currently _not_ possible to wrap
75
//! a `ParallelIterator` (or any trait that depends on it) using a
76
//! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
77
//! because `ParallelIterator` is **not object-safe**.
78
//! (This keeps the implementation simpler and allows extra optimizations.)
79
80
use self::plumbing::*;
81
use self::private::Try;
82
pub use either::Either;
83
use std::cmp::Ordering;
84
use std::collections::LinkedList;
85
use std::iter::{Product, Sum};
86
use std::ops::{Fn, RangeBounds};
87
88
pub mod plumbing;
89
90
#[cfg(test)]
91
mod test;
92
93
// There is a method to the madness here:
94
//
95
// - These modules are private but expose certain types to the end-user
96
//   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
97
//   public API surface of the `ParallelIterator` traits.
98
// - In **this** module, those public types are always used unprefixed, which forces
99
//   us to add a `pub use` and helps identify if we missed anything.
100
// - In contrast, items that appear **only** in the body of a method,
101
//   e.g. `find::find()`, are always used **prefixed**, so that they
102
//   can be readily distinguished.
103
104
mod blocks;
105
mod chain;
106
mod chunks;
107
mod cloned;
108
mod collect;
109
mod copied;
110
mod empty;
111
mod enumerate;
112
mod extend;
113
mod filter;
114
mod filter_map;
115
mod find;
116
mod find_first_last;
117
mod flat_map;
118
mod flat_map_iter;
119
mod flatten;
120
mod flatten_iter;
121
mod fold;
122
mod fold_chunks;
123
mod fold_chunks_with;
124
mod for_each;
125
mod from_par_iter;
126
mod inspect;
127
mod interleave;
128
mod interleave_shortest;
129
mod intersperse;
130
mod len;
131
mod map;
132
mod map_with;
133
mod multizip;
134
mod noop;
135
mod once;
136
mod panic_fuse;
137
mod par_bridge;
138
mod positions;
139
mod product;
140
mod reduce;
141
mod repeat;
142
mod rev;
143
mod skip;
144
mod skip_any;
145
mod skip_any_while;
146
mod splitter;
147
mod step_by;
148
mod sum;
149
mod take;
150
mod take_any;
151
mod take_any_while;
152
mod try_fold;
153
mod try_reduce;
154
mod try_reduce_with;
155
mod unzip;
156
mod update;
157
mod walk_tree;
158
mod while_some;
159
mod zip;
160
mod zip_eq;
161
162
pub use self::{
163
    blocks::{ExponentialBlocks, UniformBlocks},
164
    chain::Chain,
165
    chunks::Chunks,
166
    cloned::Cloned,
167
    copied::Copied,
168
    empty::{empty, Empty},
169
    enumerate::Enumerate,
170
    filter::Filter,
171
    filter_map::FilterMap,
172
    flat_map::FlatMap,
173
    flat_map_iter::FlatMapIter,
174
    flatten::Flatten,
175
    flatten_iter::FlattenIter,
176
    fold::{Fold, FoldWith},
177
    fold_chunks::FoldChunks,
178
    fold_chunks_with::FoldChunksWith,
179
    inspect::Inspect,
180
    interleave::Interleave,
181
    interleave_shortest::InterleaveShortest,
182
    intersperse::Intersperse,
183
    len::{MaxLen, MinLen},
184
    map::Map,
185
    map_with::{MapInit, MapWith},
186
    multizip::MultiZip,
187
    once::{once, Once},
188
    panic_fuse::PanicFuse,
189
    par_bridge::{IterBridge, ParallelBridge},
190
    positions::Positions,
191
    repeat::{repeat, repeat_n, Repeat, RepeatN},
192
    rev::Rev,
193
    skip::Skip,
194
    skip_any::SkipAny,
195
    skip_any_while::SkipAnyWhile,
196
    splitter::{split, Split},
197
    step_by::StepBy,
198
    take::Take,
199
    take_any::TakeAny,
200
    take_any_while::TakeAnyWhile,
201
    try_fold::{TryFold, TryFoldWith},
202
    update::Update,
203
    walk_tree::{
204
        walk_tree, walk_tree_postfix, walk_tree_prefix, WalkTree, WalkTreePostfix, WalkTreePrefix,
205
    },
206
    while_some::WhileSome,
207
    zip::Zip,
208
    zip_eq::ZipEq,
209
};
210
211
#[allow(deprecated)]
212
pub use repeat::repeatn;
213
214
/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
215
///
216
/// By implementing `IntoParallelIterator` for a type, you define how it will
217
/// transformed into an iterator. This is a parallel version of the standard
218
/// library's [`std::iter::IntoIterator`] trait.
219
pub trait IntoParallelIterator {
220
    /// The parallel iterator type that will be created.
221
    type Iter: ParallelIterator<Item = Self::Item>;
222
223
    /// The type of item that the parallel iterator will produce.
224
    type Item: Send;
225
226
    /// Converts `self` into a parallel iterator.
227
    ///
228
    /// # Examples
229
    ///
230
    /// ```
231
    /// use rayon::prelude::*;
232
    ///
233
    /// println!("counting in parallel:");
234
    /// (0..100).into_par_iter()
235
    ///     .for_each(|i| println!("{}", i));
236
    /// ```
237
    ///
238
    /// This conversion is often implicit for arguments to methods like [`zip`].
239
    ///
240
    /// ```
241
    /// use rayon::prelude::*;
242
    ///
243
    /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
244
    /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
245
    /// ```
246
    ///
247
    /// [`zip`]: IndexedParallelIterator::zip()
248
    fn into_par_iter(self) -> Self::Iter;
249
}
250
251
/// `IntoParallelRefIterator` implements the conversion to a
252
/// [`ParallelIterator`], providing shared references to the data.
253
///
254
/// This is a parallel version of the `iter()` method
255
/// defined by various collections.
256
///
257
/// This trait is automatically implemented
258
/// `for I where &I: IntoParallelIterator`. In most cases, users
259
/// will want to implement [`IntoParallelIterator`] rather than implement
260
/// this trait directly.
261
pub trait IntoParallelRefIterator<'data> {
262
    /// The type of the parallel iterator that will be returned.
263
    type Iter: ParallelIterator<Item = Self::Item>;
264
265
    /// The type of item that the parallel iterator will produce.
266
    /// This will typically be an `&'data T` reference type.
267
    type Item: Send + 'data;
268
269
    /// Converts `self` into a parallel iterator.
270
    ///
271
    /// # Examples
272
    ///
273
    /// ```
274
    /// use rayon::prelude::*;
275
    ///
276
    /// let v: Vec<_> = (0..100).collect();
277
    /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
278
    ///
279
    /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
280
    /// // producing the exact same references.
281
    /// assert!(v.par_iter().zip(&v)
282
    ///          .all(|(a, b)| std::ptr::eq(a, b)));
283
    /// ```
284
    fn par_iter(&'data self) -> Self::Iter;
285
}
286
287
impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
288
where
289
    &'data I: IntoParallelIterator,
290
{
291
    type Iter = <&'data I as IntoParallelIterator>::Iter;
292
    type Item = <&'data I as IntoParallelIterator>::Item;
293
294
0
    fn par_iter(&'data self) -> Self::Iter {
295
0
        self.into_par_iter()
296
0
    }
297
}
298
299
/// `IntoParallelRefMutIterator` implements the conversion to a
300
/// [`ParallelIterator`], providing mutable references to the data.
301
///
302
/// This is a parallel version of the `iter_mut()` method
303
/// defined by various collections.
304
///
305
/// This trait is automatically implemented
306
/// `for I where &mut I: IntoParallelIterator`. In most cases, users
307
/// will want to implement [`IntoParallelIterator`] rather than implement
308
/// this trait directly.
309
pub trait IntoParallelRefMutIterator<'data> {
310
    /// The type of iterator that will be created.
311
    type Iter: ParallelIterator<Item = Self::Item>;
312
313
    /// The type of item that will be produced; this is typically an
314
    /// `&'data mut T` reference.
315
    type Item: Send + 'data;
316
317
    /// Creates the parallel iterator from `self`.
318
    ///
319
    /// # Examples
320
    ///
321
    /// ```
322
    /// use rayon::prelude::*;
323
    ///
324
    /// let mut v = vec![0usize; 5];
325
    /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
326
    /// assert_eq!(v, [0, 1, 2, 3, 4]);
327
    /// ```
328
    fn par_iter_mut(&'data mut self) -> Self::Iter;
329
}
330
331
impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
332
where
333
    &'data mut I: IntoParallelIterator,
334
{
335
    type Iter = <&'data mut I as IntoParallelIterator>::Iter;
336
    type Item = <&'data mut I as IntoParallelIterator>::Item;
337
338
0
    fn par_iter_mut(&'data mut self) -> Self::Iter {
339
0
        self.into_par_iter()
340
0
    }
Unexecuted instantiation: <[rav1e::tiling::plane_region::PlaneRegionMut<u16>] as rayon::iter::IntoParallelRefMutIterator>::par_iter_mut
Unexecuted instantiation: <[rav1e::tiling::plane_region::PlaneRegionMut<u8>] as rayon::iter::IntoParallelRefMutIterator>::par_iter_mut
Unexecuted instantiation: <_ as rayon::iter::IntoParallelRefMutIterator>::par_iter_mut
341
}
342
343
/// Parallel version of the standard iterator trait.
344
///
345
/// The combinators on this trait are available on **all** parallel
346
/// iterators.  Additional methods can be found on the
347
/// [`IndexedParallelIterator`] trait: those methods are only
348
/// available for parallel iterators where the number of items is
349
/// known in advance (so, e.g., after invoking `filter`, those methods
350
/// become unavailable).
351
///
352
/// For examples of using parallel iterators, see [the docs on the
353
/// `iter` module][iter].
354
///
355
/// [iter]: self
356
pub trait ParallelIterator: Sized + Send {
357
    /// The type of item that this parallel iterator produces.
358
    /// For example, if you use the [`for_each`] method, this is the type of
359
    /// item that your closure will be invoked with.
360
    ///
361
    /// [`for_each`]: #method.for_each
362
    type Item: Send;
363
364
    /// Executes `OP` on each item produced by the iterator, in parallel.
365
    ///
366
    /// # Examples
367
    ///
368
    /// ```
369
    /// use rayon::prelude::*;
370
    ///
371
    /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
372
    /// ```
373
0
    fn for_each<OP>(self, op: OP)
374
0
    where
375
0
        OP: Fn(Self::Item) + Sync + Send,
376
    {
377
0
        for_each::for_each(self, &op)
378
0
    }
Unexecuted instantiation: <rayon::iter::enumerate::Enumerate<rayon::slice::IterMut<rav1e::tiling::plane_region::PlaneRegionMut<u16>>> as rayon::iter::ParallelIterator>::for_each::<rav1e::deblock::deblock_filter_frame<u16>::{closure#0}>
Unexecuted instantiation: <rayon::vec::IntoIter<rav1e::tiling::tiler::TileContextMut<u16>> as rayon::iter::ParallelIterator>::for_each::<rav1e::api::lookahead::compute_motion_vectors<u16>::{closure#0}>
Unexecuted instantiation: <rayon::iter::enumerate::Enumerate<rayon::slice::IterMut<rav1e::tiling::plane_region::PlaneRegionMut<u8>>> as rayon::iter::ParallelIterator>::for_each::<rav1e::deblock::deblock_filter_frame<u8>::{closure#0}>
Unexecuted instantiation: <rayon::vec::IntoIter<rav1e::tiling::tiler::TileContextMut<u8>> as rayon::iter::ParallelIterator>::for_each::<rav1e::api::lookahead::compute_motion_vectors<u8>::{closure#0}>
Unexecuted instantiation: <_ as rayon::iter::ParallelIterator>::for_each::<_>
379
380
    /// Executes `OP` on the given `init` value with each item produced by
381
    /// the iterator, in parallel.
382
    ///
383
    /// The `init` value will be cloned only as needed to be paired with
384
    /// the group of items in each rayon job.  It does not require the type
385
    /// to be `Sync`.
386
    ///
387
    /// # Examples
388
    ///
389
    /// ```
390
    /// use std::sync::mpsc::channel;
391
    /// use rayon::prelude::*;
392
    ///
393
    /// let (sender, receiver) = channel();
394
    ///
395
    /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
396
    ///
397
    /// let mut res: Vec<_> = receiver.iter().collect();
398
    ///
399
    /// res.sort();
400
    ///
401
    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
402
    /// ```
403
0
    fn for_each_with<OP, T>(self, init: T, op: OP)
404
0
    where
405
0
        OP: Fn(&mut T, Self::Item) + Sync + Send,
406
0
        T: Send + Clone,
407
    {
408
0
        self.map_with(init, op).collect()
409
0
    }
410
411
    /// Executes `OP` on a value returned by `init` with each item produced by
412
    /// the iterator, in parallel.
413
    ///
414
    /// The `init` function will be called only as needed for a value to be
415
    /// paired with the group of items in each rayon job.  There is no
416
    /// constraint on that returned type at all!
417
    ///
418
    /// # Examples
419
    ///
420
    /// ```
421
    /// use rand::Rng;
422
    /// use rayon::prelude::*;
423
    ///
424
    /// let mut v = vec![0u8; 1_000_000];
425
    ///
426
    /// v.par_chunks_mut(1000)
427
    ///     .for_each_init(
428
    ///         || rand::rng(),
429
    ///         |rng, chunk| rng.fill(chunk),
430
    ///     );
431
    ///
432
    /// // There's a remote chance that this will fail...
433
    /// for i in 0u8..=255 {
434
    ///     assert!(v.contains(&i));
435
    /// }
436
    /// ```
437
0
    fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
438
0
    where
439
0
        OP: Fn(&mut T, Self::Item) + Sync + Send,
440
0
        INIT: Fn() -> T + Sync + Send,
441
    {
442
0
        self.map_init(init, op).collect()
443
0
    }
444
445
    /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
446
    ///
447
    /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
448
    /// stop processing the rest of the items in the iterator as soon as
449
    /// possible, and we will return that terminating value.  Otherwise, we will
450
    /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
451
    /// multiple errors in parallel, it is not specified which will be returned.
452
    ///
453
    /// # Examples
454
    ///
455
    /// ```
456
    /// use rayon::prelude::*;
457
    /// use std::io::{self, Write};
458
    ///
459
    /// // This will stop iteration early if there's any write error, like
460
    /// // having piped output get closed on the other end.
461
    /// (0..100).into_par_iter()
462
    ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
463
    ///     .expect("expected no write errors");
464
    /// ```
465
0
    fn try_for_each<OP, R>(self, op: OP) -> R
466
0
    where
467
0
        OP: Fn(Self::Item) -> R + Sync + Send,
468
0
        R: Try<Output = ()> + Send,
469
    {
470
0
        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
471
0
            R::from_output(())
472
0
        }
473
474
0
        self.map(op).try_reduce(<()>::default, ok)
475
0
    }
476
477
    /// Executes a fallible `OP` on the given `init` value with each item
478
    /// produced by the iterator, in parallel.
479
    ///
480
    /// This combines the `init` semantics of [`for_each_with()`] and the
481
    /// failure semantics of [`try_for_each()`].
482
    ///
483
    /// [`for_each_with()`]: #method.for_each_with
484
    /// [`try_for_each()`]: #method.try_for_each
485
    ///
486
    /// # Examples
487
    ///
488
    /// ```
489
    /// use std::sync::mpsc::channel;
490
    /// use rayon::prelude::*;
491
    ///
492
    /// let (sender, receiver) = channel();
493
    ///
494
    /// (0..5).into_par_iter()
495
    ///     .try_for_each_with(sender, |s, x| s.send(x))
496
    ///     .expect("expected no send errors");
497
    ///
498
    /// let mut res: Vec<_> = receiver.iter().collect();
499
    ///
500
    /// res.sort();
501
    ///
502
    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
503
    /// ```
504
0
    fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
505
0
    where
506
0
        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
507
0
        T: Send + Clone,
508
0
        R: Try<Output = ()> + Send,
509
    {
510
0
        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
511
0
            R::from_output(())
512
0
        }
513
514
0
        self.map_with(init, op).try_reduce(<()>::default, ok)
515
0
    }
516
517
    /// Executes a fallible `OP` on a value returned by `init` with each item
518
    /// produced by the iterator, in parallel.
519
    ///
520
    /// This combines the `init` semantics of [`for_each_init()`] and the
521
    /// failure semantics of [`try_for_each()`].
522
    ///
523
    /// [`for_each_init()`]: #method.for_each_init
524
    /// [`try_for_each()`]: #method.try_for_each
525
    ///
526
    /// # Examples
527
    ///
528
    /// ```
529
    /// use rand::{Rng, TryRngCore};
530
    /// use rayon::prelude::*;
531
    ///
532
    /// let mut v = vec![0u8; 1_000_000];
533
    ///
534
    /// v.par_chunks_mut(1000)
535
    ///     .try_for_each_init(
536
    ///         || rand::rng(),
537
    ///         |rng, chunk| rng.try_fill_bytes(chunk),
538
    ///     )
539
    ///     .expect("expected no rand errors");
540
    ///
541
    /// // There's a remote chance that this will fail...
542
    /// for i in 0u8..=255 {
543
    ///     assert!(v.contains(&i));
544
    /// }
545
    /// ```
546
0
    fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
547
0
    where
548
0
        OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
549
0
        INIT: Fn() -> T + Sync + Send,
550
0
        R: Try<Output = ()> + Send,
551
    {
552
0
        fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
553
0
            R::from_output(())
554
0
        }
555
556
0
        self.map_init(init, op).try_reduce(<()>::default, ok)
557
0
    }
558
559
    /// Counts the number of items in this parallel iterator.
560
    ///
561
    /// # Examples
562
    ///
563
    /// ```
564
    /// use rayon::prelude::*;
565
    ///
566
    /// let count = (0..100).into_par_iter().count();
567
    ///
568
    /// assert_eq!(count, 100);
569
    /// ```
570
0
    fn count(self) -> usize {
571
0
        fn one<T>(_: T) -> usize {
572
0
            1
573
0
        }
574
575
0
        self.map(one).sum()
576
0
    }
577
578
    /// Applies `map_op` to each item of this iterator, producing a new
579
    /// iterator with the results.
580
    ///
581
    /// # Examples
582
    ///
583
    /// ```
584
    /// use rayon::prelude::*;
585
    ///
586
    /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
587
    ///
588
    /// let doubles: Vec<_> = par_iter.collect();
589
    ///
590
    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
591
    /// ```
592
0
    fn map<F, R>(self, map_op: F) -> Map<Self, F>
593
0
    where
594
0
        F: Fn(Self::Item) -> R + Sync + Send,
595
0
        R: Send,
596
    {
597
0
        Map::new(self, map_op)
598
0
    }
Unexecuted instantiation: <rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u16>, &mut rav1e::context::cdf_context::CDFContext)> as rayon::iter::ParallelIterator>::map::<rav1e::encoder::encode_tile_group<u16>::{closure#0}, (alloc::vec::Vec<u8>, rav1e::stats::EncoderStats)>
Unexecuted instantiation: <rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u8>, &mut rav1e::context::cdf_context::CDFContext)> as rayon::iter::ParallelIterator>::map::<rav1e::encoder::encode_tile_group<u8>::{closure#0}, (alloc::vec::Vec<u8>, rav1e::stats::EncoderStats)>
Unexecuted instantiation: <_ as rayon::iter::ParallelIterator>::map::<_, _>
599
600
    /// Applies `map_op` to the given `init` value with each item of this
601
    /// iterator, producing a new iterator with the results.
602
    ///
603
    /// The `init` value will be cloned only as needed to be paired with
604
    /// the group of items in each rayon job.  It does not require the type
605
    /// to be `Sync`.
606
    ///
607
    /// # Examples
608
    ///
609
    /// ```
610
    /// use std::sync::mpsc::channel;
611
    /// use rayon::prelude::*;
612
    ///
613
    /// let (sender, receiver) = channel();
614
    ///
615
    /// let a: Vec<_> = (0..5)
616
    ///                 .into_par_iter()            // iterating over i32
617
    ///                 .map_with(sender, |s, x| {
618
    ///                     s.send(x).unwrap();     // sending i32 values through the channel
619
    ///                     x                       // returning i32
620
    ///                 })
621
    ///                 .collect();                 // collecting the returned values into a vector
622
    ///
623
    /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
624
    ///                             .collect();     // and collecting them
625
    /// b.sort();
626
    ///
627
    /// assert_eq!(a, b);
628
    /// ```
629
0
    fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
630
0
    where
631
0
        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
632
0
        T: Send + Clone,
633
0
        R: Send,
634
    {
635
0
        MapWith::new(self, init, map_op)
636
0
    }
637
638
    /// Applies `map_op` to a value returned by `init` with each item of this
639
    /// iterator, producing a new iterator with the results.
640
    ///
641
    /// The `init` function will be called only as needed for a value to be
642
    /// paired with the group of items in each rayon job.  There is no
643
    /// constraint on that returned type at all!
644
    ///
645
    /// # Examples
646
    ///
647
    /// ```
648
    /// use rand::Rng;
649
    /// use rayon::prelude::*;
650
    ///
651
    /// let a: Vec<_> = (1i32..1_000_000)
652
    ///     .into_par_iter()
653
    ///     .map_init(
654
    ///         || rand::rng(),  // get the thread-local RNG
655
    ///         |rng, x| if rng.random() { // randomly negate items
656
    ///             -x
657
    ///         } else {
658
    ///             x
659
    ///         },
660
    ///     ).collect();
661
    ///
662
    /// // There's a remote chance that this will fail...
663
    /// assert!(a.iter().any(|&x| x < 0));
664
    /// assert!(a.iter().any(|&x| x > 0));
665
    /// ```
666
0
    fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
667
0
    where
668
0
        F: Fn(&mut T, Self::Item) -> R + Sync + Send,
669
0
        INIT: Fn() -> T + Sync + Send,
670
0
        R: Send,
671
    {
672
0
        MapInit::new(self, init, map_op)
673
0
    }
674
675
    /// Creates an iterator which clones all of its elements.  This may be
676
    /// useful when you have an iterator over `&T`, but you need `T`, and
677
    /// that type implements `Clone`. See also [`copied()`].
678
    ///
679
    /// [`copied()`]: #method.copied
680
    ///
681
    /// # Examples
682
    ///
683
    /// ```
684
    /// use rayon::prelude::*;
685
    ///
686
    /// let a = [1, 2, 3];
687
    ///
688
    /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
689
    ///
690
    /// // cloned is the same as .map(|&x| x), for integers
691
    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
692
    ///
693
    /// assert_eq!(v_cloned, vec![1, 2, 3]);
694
    /// assert_eq!(v_map, vec![1, 2, 3]);
695
    /// ```
696
0
    fn cloned<'a, T>(self) -> Cloned<Self>
697
0
    where
698
0
        T: 'a + Clone + Send,
699
0
        Self: ParallelIterator<Item = &'a T>,
700
    {
701
0
        Cloned::new(self)
702
0
    }
703
704
    /// Creates an iterator which copies all of its elements.  This may be
705
    /// useful when you have an iterator over `&T`, but you need `T`, and
706
    /// that type implements `Copy`. See also [`cloned()`].
707
    ///
708
    /// [`cloned()`]: #method.cloned
709
    ///
710
    /// # Examples
711
    ///
712
    /// ```
713
    /// use rayon::prelude::*;
714
    ///
715
    /// let a = [1, 2, 3];
716
    ///
717
    /// let v_copied: Vec<_> = a.par_iter().copied().collect();
718
    ///
719
    /// // copied is the same as .map(|&x| x), for integers
720
    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
721
    ///
722
    /// assert_eq!(v_copied, vec![1, 2, 3]);
723
    /// assert_eq!(v_map, vec![1, 2, 3]);
724
    /// ```
725
0
    fn copied<'a, T>(self) -> Copied<Self>
726
0
    where
727
0
        T: 'a + Copy + Send,
728
0
        Self: ParallelIterator<Item = &'a T>,
729
    {
730
0
        Copied::new(self)
731
0
    }
732
733
    /// Applies `inspect_op` to a reference to each item of this iterator,
734
    /// producing a new iterator passing through the original items.  This is
735
    /// often useful for debugging to see what's happening in iterator stages.
736
    ///
737
    /// # Examples
738
    ///
739
    /// ```
740
    /// use rayon::prelude::*;
741
    ///
742
    /// let a = [1, 4, 2, 3];
743
    ///
744
    /// // this iterator sequence is complex.
745
    /// let sum = a.par_iter()
746
    ///             .cloned()
747
    ///             .filter(|&x| x % 2 == 0)
748
    ///             .reduce(|| 0, |sum, i| sum + i);
749
    ///
750
    /// println!("{}", sum);
751
    ///
752
    /// // let's add some inspect() calls to investigate what's happening
753
    /// let sum = a.par_iter()
754
    ///             .cloned()
755
    ///             .inspect(|x| println!("about to filter: {}", x))
756
    ///             .filter(|&x| x % 2 == 0)
757
    ///             .inspect(|x| println!("made it through filter: {}", x))
758
    ///             .reduce(|| 0, |sum, i| sum + i);
759
    ///
760
    /// println!("{}", sum);
761
    /// ```
762
0
    fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
763
0
    where
764
0
        OP: Fn(&Self::Item) + Sync + Send,
765
    {
766
0
        Inspect::new(self, inspect_op)
767
0
    }
768
769
    /// Mutates each item of this iterator before yielding it.
770
    ///
771
    /// # Examples
772
    ///
773
    /// ```
774
    /// use rayon::prelude::*;
775
    ///
776
    /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
777
    ///
778
    /// let doubles: Vec<_> = par_iter.collect();
779
    ///
780
    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
781
    /// ```
782
0
    fn update<F>(self, update_op: F) -> Update<Self, F>
783
0
    where
784
0
        F: Fn(&mut Self::Item) + Sync + Send,
785
    {
786
0
        Update::new(self, update_op)
787
0
    }
788
789
    /// Applies `filter_op` to each item of this iterator, producing a new
790
    /// iterator with only the items that gave `true` results.
791
    ///
792
    /// # Examples
793
    ///
794
    /// ```
795
    /// use rayon::prelude::*;
796
    ///
797
    /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
798
    ///
799
    /// let even_numbers: Vec<_> = par_iter.collect();
800
    ///
801
    /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
802
    /// ```
803
0
    fn filter<P>(self, filter_op: P) -> Filter<Self, P>
804
0
    where
805
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
806
    {
807
0
        Filter::new(self, filter_op)
808
0
    }
809
810
    /// Applies `filter_op` to each item of this iterator to get an `Option`,
811
    /// producing a new iterator with only the items from `Some` results.
812
    ///
813
    /// # Examples
814
    ///
815
    /// ```
816
    /// use rayon::prelude::*;
817
    ///
818
    /// let mut par_iter = (0..10).into_par_iter()
819
    ///                         .filter_map(|x| {
820
    ///                             if x % 2 == 0 { Some(x * 3) }
821
    ///                             else { None }
822
    ///                         });
823
    ///
824
    /// let even_numbers: Vec<_> = par_iter.collect();
825
    ///
826
    /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
827
    /// ```
828
0
    fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
829
0
    where
830
0
        P: Fn(Self::Item) -> Option<R> + Sync + Send,
831
0
        R: Send,
832
    {
833
0
        FilterMap::new(self, filter_op)
834
0
    }
835
836
    /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
837
    /// producing a new parallel iterator that flattens these back into one.
838
    ///
839
    /// See also [`flat_map_iter`](#method.flat_map_iter).
840
    ///
841
    /// # Examples
842
    ///
843
    /// ```
844
    /// use rayon::prelude::*;
845
    ///
846
    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
847
    ///
848
    /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
849
    ///
850
    /// let vec: Vec<_> = par_iter.collect();
851
    ///
852
    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
853
    /// ```
854
0
    fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
855
0
    where
856
0
        F: Fn(Self::Item) -> PI + Sync + Send,
857
0
        PI: IntoParallelIterator,
858
    {
859
0
        FlatMap::new(self, map_op)
860
0
    }
861
862
    /// Applies `map_op` to each item of this iterator to get nested serial iterators,
863
    /// producing a new parallel iterator that flattens these back into one.
864
    ///
865
    /// # `flat_map_iter` versus `flat_map`
866
    ///
867
    /// These two methods are similar but behave slightly differently. With [`flat_map`],
868
    /// each of the nested iterators must be a parallel iterator, and they will be further
869
    /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
870
    /// sequential `Iterator`, and we only parallelize _between_ them, while the items
871
    /// produced by each nested iterator are processed sequentially.
872
    ///
873
    /// When choosing between these methods, consider whether nested parallelism suits the
874
    /// potential iterators at hand. If there's little computation involved, or its length
875
    /// is much less than the outer parallel iterator, then it may perform better to avoid
876
    /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
877
    /// If there is a lot of computation, potentially outweighing the outer parallel
878
    /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
879
    ///
880
    /// [`flat_map`]: #method.flat_map
881
    ///
882
    /// # Examples
883
    ///
884
    /// ```
885
    /// use rayon::prelude::*;
886
    /// use std::cell::RefCell;
887
    ///
888
    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
889
    ///
890
    /// let par_iter = a.par_iter().flat_map_iter(|a| {
891
    ///     // The serial iterator doesn't have to be thread-safe, just its items.
892
    ///     let cell_iter = RefCell::new(a.iter().cloned());
893
    ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
894
    /// });
895
    ///
896
    /// let vec: Vec<_> = par_iter.collect();
897
    ///
898
    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
899
    /// ```
900
0
    fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
901
0
    where
902
0
        F: Fn(Self::Item) -> SI + Sync + Send,
903
0
        SI: IntoIterator<Item: Send>,
904
    {
905
0
        FlatMapIter::new(self, map_op)
906
0
    }
907
908
    /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
909
    ///
910
    /// See also [`flatten_iter`](#method.flatten_iter).
911
    ///
912
    /// # Examples
913
    ///
914
    /// ```
915
    /// use rayon::prelude::*;
916
    ///
917
    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
918
    /// let y: Vec<_> = x.into_par_iter().flatten().collect();
919
    ///
920
    /// assert_eq!(y, vec![1, 2, 3, 4]);
921
    /// ```
922
0
    fn flatten(self) -> Flatten<Self>
923
0
    where
924
0
        Self::Item: IntoParallelIterator,
925
    {
926
0
        Flatten::new(self)
927
0
    }
928
929
    /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
930
    ///
931
    /// See also [`flatten`](#method.flatten) and the analogous comparison of
932
    /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
933
    ///
934
    /// # Examples
935
    ///
936
    /// ```
937
    /// use rayon::prelude::*;
938
    ///
939
    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
940
    /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
941
    /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
942
    ///
943
    /// assert_eq!(y, vec![1, 2, 3, 4]);
944
    /// ```
945
0
    fn flatten_iter(self) -> FlattenIter<Self>
946
0
    where
947
0
        Self::Item: IntoIterator<Item: Send>,
948
    {
949
0
        FlattenIter::new(self)
950
0
    }
951
952
    /// Reduces the items in the iterator into one item using `op`.
953
    /// The argument `identity` should be a closure that can produce
954
    /// "identity" value which may be inserted into the sequence as
955
    /// needed to create opportunities for parallel execution. So, for
956
    /// example, if you are doing a summation, then `identity()` ought
957
    /// to produce something that represents the zero for your type
958
    /// (but consider just calling `sum()` in that case).
959
    ///
960
    /// # Examples
961
    ///
962
    /// ```
963
    /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
964
    /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
965
    /// // where the first/second elements are summed separately.
966
    /// use rayon::prelude::*;
967
    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
968
    ///            .par_iter()        // iterating over &(i32, i32)
969
    ///            .cloned()          // iterating over (i32, i32)
970
    ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
971
    ///                    |a, b| (a.0 + b.0, a.1 + b.1));
972
    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
973
    /// ```
974
    ///
975
    /// **Note:** unlike a sequential `fold` operation, the order in
976
    /// which `op` will be applied to reduce the result is not fully
977
    /// specified. So `op` should be [associative] or else the results
978
    /// will be non-deterministic. And of course `identity()` should
979
    /// produce a true identity.
980
    ///
981
    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
982
0
    fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
983
0
    where
984
0
        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
985
0
        ID: Fn() -> Self::Item + Sync + Send,
986
    {
987
0
        reduce::reduce(self, identity, op)
988
0
    }
989
990
    /// Reduces the items in the iterator into one item using `op`.
991
    /// If the iterator is empty, `None` is returned; otherwise,
992
    /// `Some` is returned.
993
    ///
994
    /// This version of `reduce` is simple but somewhat less
995
    /// efficient. If possible, it is better to call `reduce()`, which
996
    /// requires an identity element.
997
    ///
998
    /// # Examples
999
    ///
1000
    /// ```
1001
    /// use rayon::prelude::*;
1002
    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1003
    ///            .par_iter()        // iterating over &(i32, i32)
1004
    ///            .cloned()          // iterating over (i32, i32)
1005
    ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1006
    ///            .unwrap();
1007
    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1008
    /// ```
1009
    ///
1010
    /// **Note:** unlike a sequential `fold` operation, the order in
1011
    /// which `op` will be applied to reduce the result is not fully
1012
    /// specified. So `op` should be [associative] or else the results
1013
    /// will be non-deterministic.
1014
    ///
1015
    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1016
0
    fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1017
0
    where
1018
0
        OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1019
    {
1020
0
        fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1021
0
            move |opt_a, b| match opt_a {
1022
0
                Some(a) => Some(op(a, b)),
1023
0
                None => Some(b),
1024
0
            }
1025
0
        }
1026
1027
0
        fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1028
0
            move |opt_a, opt_b| match (opt_a, opt_b) {
1029
0
                (Some(a), Some(b)) => Some(op(a, b)),
1030
0
                (Some(v), None) | (None, Some(v)) => Some(v),
1031
0
                (None, None) => None,
1032
0
            }
1033
0
        }
1034
1035
0
        self.fold(<_>::default, opt_fold(&op))
1036
0
            .reduce(<_>::default, opt_reduce(&op))
1037
0
    }
1038
1039
    /// Reduces the items in the iterator into one item using a fallible `op`.
1040
    /// The `identity` argument is used the same way as in [`reduce()`].
1041
    ///
1042
    /// [`reduce()`]: #method.reduce
1043
    ///
1044
    /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1045
    /// to one, we will attempt to stop processing the rest of the items in the
1046
    /// iterator as soon as possible, and we will return that terminating value.
1047
    /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1048
    /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1049
    /// specified which will be returned.
1050
    ///
1051
    /// # Examples
1052
    ///
1053
    /// ```
1054
    /// use rayon::prelude::*;
1055
    ///
1056
    /// // Compute the sum of squares, being careful about overflow.
1057
    /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1058
    ///     iter.into_par_iter()
1059
    ///         .map(|i| i.checked_mul(i))            // square each item,
1060
    ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1061
    /// }
1062
    /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1063
    ///
1064
    /// // The sum might overflow
1065
    /// assert_eq!(sum_squares(0..10_000), None);
1066
    ///
1067
    /// // Or the squares might overflow before it even reaches `try_reduce`
1068
    /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1069
    /// ```
1070
0
    fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1071
0
    where
1072
0
        OP: Fn(T, T) -> Self::Item + Sync + Send,
1073
0
        ID: Fn() -> T + Sync + Send,
1074
0
        Self::Item: Try<Output = T>,
1075
    {
1076
0
        try_reduce::try_reduce(self, identity, op)
1077
0
    }
1078
1079
    /// Reduces the items in the iterator into one item using a fallible `op`.
1080
    ///
1081
    /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1082
    /// otherwise, `Some` is returned.  Beyond that, it behaves like
1083
    /// [`try_reduce()`] for handling `Err`/`None`.
1084
    ///
1085
    /// [`reduce_with()`]: #method.reduce_with
1086
    /// [`try_reduce()`]: #method.try_reduce
1087
    ///
1088
    /// For instance, with `Option` items, the return value may be:
1089
    /// - `None`, the iterator was empty
1090
    /// - `Some(None)`, we stopped after encountering `None`.
1091
    /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1092
    ///
1093
    /// With `Result` items, the nesting is more obvious:
1094
    /// - `None`, the iterator was empty
1095
    /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1096
    /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1097
    ///
1098
    /// # Examples
1099
    ///
1100
    /// ```
1101
    /// use rayon::prelude::*;
1102
    ///
1103
    /// let files = ["/dev/null", "/does/not/exist"];
1104
    ///
1105
    /// // Find the biggest file
1106
    /// files.into_par_iter()
1107
    ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1108
    ///     .try_reduce_with(|a, b| {
1109
    ///         Ok(if a.1 >= b.1 { a } else { b })
1110
    ///     })
1111
    ///     .expect("Some value, since the iterator is not empty")
1112
    ///     .expect_err("not found");
1113
    /// ```
1114
0
    fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1115
0
    where
1116
0
        OP: Fn(T, T) -> Self::Item + Sync + Send,
1117
0
        Self::Item: Try<Output = T>,
1118
    {
1119
0
        try_reduce_with::try_reduce_with(self, op)
1120
0
    }
1121
1122
    /// Parallel fold is similar to sequential fold except that the
1123
    /// sequence of items may be subdivided before it is
1124
    /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1125
    /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1126
    /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1127
    /// 77, and so forth. The **parallel fold** works similarly except
1128
    /// that it first breaks up your list into sublists, and hence
1129
    /// instead of yielding up a single sum at the end, it yields up
1130
    /// multiple sums. The number of results is nondeterministic, as
1131
    /// is the point where the breaks occur.
1132
    ///
1133
    /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1134
    /// our example list, we might wind up with a sequence of two numbers,
1135
    /// like so:
1136
    ///
1137
    /// ```notrust
1138
    /// 22 3 77 89 46
1139
    ///       |     |
1140
    ///     102   135
1141
    /// ```
1142
    ///
1143
    /// Or perhaps these three numbers:
1144
    ///
1145
    /// ```notrust
1146
    /// 22 3 77 89 46
1147
    ///       |  |  |
1148
    ///     102 89 46
1149
    /// ```
1150
    ///
1151
    /// In general, Rayon will attempt to find good breaking points
1152
    /// that keep all of your cores busy.
1153
    ///
1154
    /// ### Fold versus reduce
1155
    ///
1156
    /// The `fold()` and `reduce()` methods each take an identity element
1157
    /// and a combining function, but they operate rather differently.
1158
    ///
1159
    /// `reduce()` requires that the identity function has the same
1160
    /// type as the things you are iterating over, and it fully
1161
    /// reduces the list of items into a single item. So, for example,
1162
    /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1163
    /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1164
    /// u8| a + b)`, we would get an overflow. This is because `0`,
1165
    /// `a`, and `b` here are all bytes, just like the numbers in the
1166
    /// list (I wrote the types explicitly above, but those are the
1167
    /// only types you can use). To avoid the overflow, we would need
1168
    /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1169
    /// b| a + b)`, in which case our result would be `256`.
1170
    ///
1171
    /// In contrast, with `fold()`, the identity function does not
1172
    /// have to have the same type as the things you are iterating
1173
    /// over, and you potentially get back many results. So, if we
1174
    /// continue with the `bytes` example from the previous paragraph,
1175
    /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1176
    /// convert our bytes into `u32`. And of course we might not get
1177
    /// back a single sum.
1178
    ///
1179
    /// There is a more subtle distinction as well, though it's
1180
    /// actually implied by the above points. When you use `reduce()`,
1181
    /// your reduction function is sometimes called with values that
1182
    /// were never part of your original parallel iterator (for
1183
    /// example, both the left and right might be a partial sum). With
1184
    /// `fold()`, in contrast, the left value in the fold function is
1185
    /// always the accumulator, and the right value is always from
1186
    /// your original sequence.
1187
    ///
1188
    /// ### Fold vs Map/Reduce
1189
    ///
1190
    /// Fold makes sense if you have some operation where it is
1191
    /// cheaper to create groups of elements at a time. For example,
1192
    /// imagine collecting characters into a string. If you were going
1193
    /// to use map/reduce, you might try this:
1194
    ///
1195
    /// ```
1196
    /// use rayon::prelude::*;
1197
    ///
1198
    /// let s =
1199
    ///     ['a', 'b', 'c', 'd', 'e']
1200
    ///     .par_iter()
1201
    ///     .map(|c: &char| format!("{}", c))
1202
    ///     .reduce(|| String::new(),
1203
    ///             |mut a: String, b: String| { a.push_str(&b); a });
1204
    ///
1205
    /// assert_eq!(s, "abcde");
1206
    /// ```
1207
    ///
1208
    /// Because reduce produces the same type of element as its input,
1209
    /// you have to first map each character into a string, and then
1210
    /// you can reduce them. This means we create one string per
1211
    /// element in our iterator -- not so great. Using `fold`, we can
1212
    /// do this instead:
1213
    ///
1214
    /// ```
1215
    /// use rayon::prelude::*;
1216
    ///
1217
    /// let s =
1218
    ///     ['a', 'b', 'c', 'd', 'e']
1219
    ///     .par_iter()
1220
    ///     .fold(|| String::new(),
1221
    ///             |mut s: String, c: &char| { s.push(*c); s })
1222
    ///     .reduce(|| String::new(),
1223
    ///             |mut a: String, b: String| { a.push_str(&b); a });
1224
    ///
1225
    /// assert_eq!(s, "abcde");
1226
    /// ```
1227
    ///
1228
    /// Now `fold` will process groups of our characters at a time,
1229
    /// and we only make one string per group. We should wind up with
1230
    /// some small-ish number of strings roughly proportional to the
1231
    /// number of CPUs you have (it will ultimately depend on how busy
1232
    /// your processors are). Note that we still need to do a reduce
1233
    /// afterwards to combine those groups of strings into a single
1234
    /// string.
1235
    ///
1236
    /// You could use a similar trick to save partial results (e.g., a
1237
    /// cache) or something similar.
1238
    ///
1239
    /// ### Combining fold with other operations
1240
    ///
1241
    /// You can combine `fold` with `reduce` if you want to produce a
1242
    /// single value. This is then roughly equivalent to a map/reduce
1243
    /// combination in effect:
1244
    ///
1245
    /// ```
1246
    /// use rayon::prelude::*;
1247
    ///
1248
    /// let bytes = 0..22_u8;
1249
    /// let sum = bytes.into_par_iter()
1250
    ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1251
    ///                .sum::<u32>();
1252
    ///
1253
    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1254
    /// ```
1255
0
    fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1256
0
    where
1257
0
        F: Fn(T, Self::Item) -> T + Sync + Send,
1258
0
        ID: Fn() -> T + Sync + Send,
1259
0
        T: Send,
1260
    {
1261
0
        Fold::new(self, identity, fold_op)
1262
0
    }
1263
1264
    /// Applies `fold_op` to the given `init` value with each item of this
1265
    /// iterator, finally producing the value for further use.
1266
    ///
1267
    /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1268
    /// it doesn't require the `init` type to be `Sync`, nor any other form
1269
    /// of added synchronization.
1270
    ///
1271
    /// # Examples
1272
    ///
1273
    /// ```
1274
    /// use rayon::prelude::*;
1275
    ///
1276
    /// let bytes = 0..22_u8;
1277
    /// let sum = bytes.into_par_iter()
1278
    ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1279
    ///                .sum::<u32>();
1280
    ///
1281
    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1282
    /// ```
1283
0
    fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1284
0
    where
1285
0
        F: Fn(T, Self::Item) -> T + Sync + Send,
1286
0
        T: Send + Clone,
1287
    {
1288
0
        FoldWith::new(self, init, fold_op)
1289
0
    }
1290
1291
    /// Performs a fallible parallel fold.
1292
    ///
1293
    /// This is a variation of [`fold()`] for operations which can fail with
1294
    /// `Option::None` or `Result::Err`.  The first such failure stops
1295
    /// processing the local set of items, without affecting other folds in the
1296
    /// iterator's subdivisions.
1297
    ///
1298
    /// Often, `try_fold()` will be followed by [`try_reduce()`]
1299
    /// for a final reduction and global short-circuiting effect.
1300
    ///
1301
    /// [`fold()`]: #method.fold
1302
    /// [`try_reduce()`]: #method.try_reduce
1303
    ///
1304
    /// # Examples
1305
    ///
1306
    /// ```
1307
    /// use rayon::prelude::*;
1308
    ///
1309
    /// let bytes = 0..22_u8;
1310
    /// let sum = bytes.into_par_iter()
1311
    ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1312
    ///                .try_reduce(|| 0, u32::checked_add);
1313
    ///
1314
    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1315
    /// ```
1316
0
    fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1317
0
    where
1318
0
        F: Fn(T, Self::Item) -> R + Sync + Send,
1319
0
        ID: Fn() -> T + Sync + Send,
1320
0
        R: Try<Output = T> + Send,
1321
    {
1322
0
        TryFold::new(self, identity, fold_op)
1323
0
    }
1324
1325
    /// Performs a fallible parallel fold with a cloneable `init` value.
1326
    ///
1327
    /// This combines the `init` semantics of [`fold_with()`] and the failure
1328
    /// semantics of [`try_fold()`].
1329
    ///
1330
    /// [`fold_with()`]: #method.fold_with
1331
    /// [`try_fold()`]: #method.try_fold
1332
    ///
1333
    /// ```
1334
    /// use rayon::prelude::*;
1335
    ///
1336
    /// let bytes = 0..22_u8;
1337
    /// let sum = bytes.into_par_iter()
1338
    ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1339
    ///                .try_reduce(|| 0, u32::checked_add);
1340
    ///
1341
    /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1342
    /// ```
1343
0
    fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1344
0
    where
1345
0
        F: Fn(T, Self::Item) -> R + Sync + Send,
1346
0
        R: Try<Output = T> + Send,
1347
0
        T: Clone + Send,
1348
    {
1349
0
        TryFoldWith::new(self, init, fold_op)
1350
0
    }
1351
1352
    /// Sums up the items in the iterator.
1353
    ///
1354
    /// Note that the order in items will be reduced is not specified,
1355
    /// so if the `+` operator is not truly [associative] \(as is the
1356
    /// case for floating point numbers), then the results are not
1357
    /// fully deterministic.
1358
    ///
1359
    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1360
    ///
1361
    /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1362
    /// except that the type of `0` and the `+` operation may vary
1363
    /// depending on the type of value being produced.
1364
    ///
1365
    /// # Examples
1366
    ///
1367
    /// ```
1368
    /// use rayon::prelude::*;
1369
    ///
1370
    /// let a = [1, 5, 7];
1371
    ///
1372
    /// let sum: i32 = a.par_iter().sum();
1373
    ///
1374
    /// assert_eq!(sum, 13);
1375
    /// ```
1376
0
    fn sum<S>(self) -> S
1377
0
    where
1378
0
        S: Send + Sum<Self::Item> + Sum<S>,
1379
    {
1380
0
        sum::sum(self)
1381
0
    }
1382
1383
    /// Multiplies all the items in the iterator.
1384
    ///
1385
    /// Note that the order in items will be reduced is not specified,
1386
    /// so if the `*` operator is not truly [associative] \(as is the
1387
    /// case for floating point numbers), then the results are not
1388
    /// fully deterministic.
1389
    ///
1390
    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1391
    ///
1392
    /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1393
    /// except that the type of `1` and the `*` operation may vary
1394
    /// depending on the type of value being produced.
1395
    ///
1396
    /// # Examples
1397
    ///
1398
    /// ```
1399
    /// use rayon::prelude::*;
1400
    ///
1401
    /// fn factorial(n: u32) -> u32 {
1402
    ///    (1..n+1).into_par_iter().product()
1403
    /// }
1404
    ///
1405
    /// assert_eq!(factorial(0), 1);
1406
    /// assert_eq!(factorial(1), 1);
1407
    /// assert_eq!(factorial(5), 120);
1408
    /// ```
1409
0
    fn product<P>(self) -> P
1410
0
    where
1411
0
        P: Send + Product<Self::Item> + Product<P>,
1412
    {
1413
0
        product::product(self)
1414
0
    }
1415
1416
    /// Computes the minimum of all the items in the iterator. If the
1417
    /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1418
    /// is returned.
1419
    ///
1420
    /// Note that the order in which the items will be reduced is not
1421
    /// specified, so if the `Ord` impl is not truly associative, then
1422
    /// the results are not deterministic.
1423
    ///
1424
    /// Basically equivalent to `self.reduce_with(|a, b| Ord::min(a, b))`.
1425
    ///
1426
    /// # Examples
1427
    ///
1428
    /// ```
1429
    /// use rayon::prelude::*;
1430
    ///
1431
    /// let a = [45, 74, 32];
1432
    ///
1433
    /// assert_eq!(a.par_iter().min(), Some(&32));
1434
    ///
1435
    /// let b: [i32; 0] = [];
1436
    ///
1437
    /// assert_eq!(b.par_iter().min(), None);
1438
    /// ```
1439
0
    fn min(self) -> Option<Self::Item>
1440
0
    where
1441
0
        Self::Item: Ord,
1442
    {
1443
0
        self.reduce_with(Ord::min)
1444
0
    }
1445
1446
    /// Computes the minimum of all the items in the iterator with respect to
1447
    /// the given comparison function. If the iterator is empty, `None` is
1448
    /// returned; otherwise, `Some(min)` is returned.
1449
    ///
1450
    /// Note that the order in which the items will be reduced is not
1451
    /// specified, so if the comparison function is not associative, then
1452
    /// the results are not deterministic.
1453
    ///
1454
    /// # Examples
1455
    ///
1456
    /// ```
1457
    /// use rayon::prelude::*;
1458
    ///
1459
    /// let a = [-3_i32, 77, 53, 240, -1];
1460
    ///
1461
    /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1462
    /// ```
1463
0
    fn min_by<F>(self, f: F) -> Option<Self::Item>
1464
0
    where
1465
0
        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1466
    {
1467
0
        fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1468
0
            move |a, b| match f(&a, &b) {
1469
0
                Ordering::Greater => b,
1470
0
                _ => a,
1471
0
            }
1472
0
        }
1473
1474
0
        self.reduce_with(min(f))
1475
0
    }
1476
1477
    /// Computes the item that yields the minimum value for the given
1478
    /// function. If the iterator is empty, `None` is returned;
1479
    /// otherwise, `Some(item)` is returned.
1480
    ///
1481
    /// Note that the order in which the items will be reduced is not
1482
    /// specified, so if the `Ord` impl is not truly associative, then
1483
    /// the results are not deterministic.
1484
    ///
1485
    /// # Examples
1486
    ///
1487
    /// ```
1488
    /// use rayon::prelude::*;
1489
    ///
1490
    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1491
    ///
1492
    /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1493
    /// ```
1494
0
    fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1495
0
    where
1496
0
        K: Ord + Send,
1497
0
        F: Sync + Send + Fn(&Self::Item) -> K,
1498
    {
1499
0
        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1500
0
            move |x| (f(&x), x)
1501
0
        }
1502
1503
0
        fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1504
0
            match (a.0).cmp(&b.0) {
1505
0
                Ordering::Greater => b,
1506
0
                _ => a,
1507
            }
1508
0
        }
1509
1510
0
        let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1511
0
        Some(x)
1512
0
    }
1513
1514
    /// Computes the maximum of all the items in the iterator. If the
1515
    /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1516
    /// is returned.
1517
    ///
1518
    /// Note that the order in which the items will be reduced is not
1519
    /// specified, so if the `Ord` impl is not truly associative, then
1520
    /// the results are not deterministic.
1521
    ///
1522
    /// Basically equivalent to `self.reduce_with(|a, b| Ord::max(a, b))`.
1523
    ///
1524
    /// # Examples
1525
    ///
1526
    /// ```
1527
    /// use rayon::prelude::*;
1528
    ///
1529
    /// let a = [45, 74, 32];
1530
    ///
1531
    /// assert_eq!(a.par_iter().max(), Some(&74));
1532
    ///
1533
    /// let b: [i32; 0] = [];
1534
    ///
1535
    /// assert_eq!(b.par_iter().max(), None);
1536
    /// ```
1537
0
    fn max(self) -> Option<Self::Item>
1538
0
    where
1539
0
        Self::Item: Ord,
1540
    {
1541
0
        self.reduce_with(Ord::max)
1542
0
    }
1543
1544
    /// Computes the maximum of all the items in the iterator with respect to
1545
    /// the given comparison function. If the iterator is empty, `None` is
1546
    /// returned; otherwise, `Some(max)` is returned.
1547
    ///
1548
    /// Note that the order in which the items will be reduced is not
1549
    /// specified, so if the comparison function is not associative, then
1550
    /// the results are not deterministic.
1551
    ///
1552
    /// # Examples
1553
    ///
1554
    /// ```
1555
    /// use rayon::prelude::*;
1556
    ///
1557
    /// let a = [-3_i32, 77, 53, 240, -1];
1558
    ///
1559
    /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1560
    /// ```
1561
0
    fn max_by<F>(self, f: F) -> Option<Self::Item>
1562
0
    where
1563
0
        F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1564
    {
1565
0
        fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1566
0
            move |a, b| match f(&a, &b) {
1567
0
                Ordering::Greater => a,
1568
0
                _ => b,
1569
0
            }
1570
0
        }
1571
1572
0
        self.reduce_with(max(f))
1573
0
    }
1574
1575
    /// Computes the item that yields the maximum value for the given
1576
    /// function. If the iterator is empty, `None` is returned;
1577
    /// otherwise, `Some(item)` is returned.
1578
    ///
1579
    /// Note that the order in which the items will be reduced is not
1580
    /// specified, so if the `Ord` impl is not truly associative, then
1581
    /// the results are not deterministic.
1582
    ///
1583
    /// # Examples
1584
    ///
1585
    /// ```
1586
    /// use rayon::prelude::*;
1587
    ///
1588
    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1589
    ///
1590
    /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1591
    /// ```
1592
0
    fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1593
0
    where
1594
0
        K: Ord + Send,
1595
0
        F: Sync + Send + Fn(&Self::Item) -> K,
1596
    {
1597
0
        fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1598
0
            move |x| (f(&x), x)
1599
0
        }
1600
1601
0
        fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1602
0
            match (a.0).cmp(&b.0) {
1603
0
                Ordering::Greater => a,
1604
0
                _ => b,
1605
            }
1606
0
        }
1607
1608
0
        let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1609
0
        Some(x)
1610
0
    }
1611
1612
    /// Takes two iterators and creates a new iterator over both.
1613
    ///
1614
    /// # Examples
1615
    ///
1616
    /// ```
1617
    /// use rayon::prelude::*;
1618
    ///
1619
    /// let a = [0, 1, 2];
1620
    /// let b = [9, 8, 7];
1621
    ///
1622
    /// let par_iter = a.par_iter().chain(b.par_iter());
1623
    ///
1624
    /// let chained: Vec<_> = par_iter.cloned().collect();
1625
    ///
1626
    /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1627
    /// ```
1628
0
    fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1629
0
    where
1630
0
        C: IntoParallelIterator<Item = Self::Item>,
1631
    {
1632
0
        Chain::new(self, chain.into_par_iter())
1633
0
    }
Unexecuted instantiation: <rayon::range::Iter<i8> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<i8>>
Unexecuted instantiation: <rayon::range::Iter<u8> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<u8>>
Unexecuted instantiation: <rayon::range::Iter<isize> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<isize>>
Unexecuted instantiation: <rayon::range::Iter<usize> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<usize>>
Unexecuted instantiation: <rayon::range::Iter<i32> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<i32>>
Unexecuted instantiation: <rayon::range::Iter<u32> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<u32>>
Unexecuted instantiation: <rayon::range::Iter<i128> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<i128>>
Unexecuted instantiation: <rayon::range::Iter<u128> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<u128>>
Unexecuted instantiation: <rayon::range::Iter<i16> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<i16>>
Unexecuted instantiation: <rayon::range::Iter<u16> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<u16>>
Unexecuted instantiation: <rayon::range::Iter<i64> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<i64>>
Unexecuted instantiation: <rayon::range::Iter<u64> as rayon::iter::ParallelIterator>::chain::<rayon::iter::once::Once<u64>>
1634
1635
    /// Searches for **some** item in the parallel iterator that
1636
    /// matches the given predicate and returns it. This operation
1637
    /// is similar to [`find` on sequential iterators][find] but
1638
    /// the item returned may not be the **first** one in the parallel
1639
    /// sequence which matches, since we search the entire sequence in parallel.
1640
    ///
1641
    /// Once a match is found, we will attempt to stop processing
1642
    /// the rest of the items in the iterator as soon as possible
1643
    /// (just as `find` stops iterating once a match is found).
1644
    ///
1645
    /// [find]: Iterator::find()
1646
    ///
1647
    /// # Examples
1648
    ///
1649
    /// ```
1650
    /// use rayon::prelude::*;
1651
    ///
1652
    /// let a = [1, 2, 3, 3];
1653
    ///
1654
    /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1655
    ///
1656
    /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1657
    /// ```
1658
0
    fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1659
0
    where
1660
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
1661
    {
1662
0
        find::find(self, predicate)
1663
0
    }
1664
1665
    /// Searches for the sequentially **first** item in the parallel iterator
1666
    /// that matches the given predicate and returns it.
1667
    ///
1668
    /// Once a match is found, all attempts to the right of the match
1669
    /// will be stopped, while attempts to the left must continue in case
1670
    /// an earlier match is found.
1671
    ///
1672
    /// For added performance, you might consider using `find_first` in conjunction with
1673
    /// [`by_exponential_blocks()`][IndexedParallelIterator::by_exponential_blocks].
1674
    ///
1675
    /// Note that not all parallel iterators have a useful order, much like
1676
    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1677
    /// just want the first match that discovered anywhere in the iterator,
1678
    /// `find_any` is a better choice.
1679
    ///
1680
    /// # Examples
1681
    ///
1682
    /// ```
1683
    /// use rayon::prelude::*;
1684
    ///
1685
    /// let a = [1, 2, 3, 3];
1686
    ///
1687
    /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1688
    ///
1689
    /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1690
    /// ```
1691
0
    fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1692
0
    where
1693
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
1694
    {
1695
0
        find_first_last::find_first(self, predicate)
1696
0
    }
1697
1698
    /// Searches for the sequentially **last** item in the parallel iterator
1699
    /// that matches the given predicate and returns it.
1700
    ///
1701
    /// Once a match is found, all attempts to the left of the match
1702
    /// will be stopped, while attempts to the right must continue in case
1703
    /// a later match is found.
1704
    ///
1705
    /// Note that not all parallel iterators have a useful order, much like
1706
    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1707
    /// order doesn't actually matter to you, `find_any` is a better choice.
1708
    ///
1709
    /// # Examples
1710
    ///
1711
    /// ```
1712
    /// use rayon::prelude::*;
1713
    ///
1714
    /// let a = [1, 2, 3, 3];
1715
    ///
1716
    /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1717
    ///
1718
    /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1719
    /// ```
1720
0
    fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1721
0
    where
1722
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
1723
    {
1724
0
        find_first_last::find_last(self, predicate)
1725
0
    }
1726
1727
    /// Applies the given predicate to the items in the parallel iterator
1728
    /// and returns **any** non-None result of the map operation.
1729
    ///
1730
    /// Once a non-None value is produced from the map operation, we will
1731
    /// attempt to stop processing the rest of the items in the iterator
1732
    /// as soon as possible.
1733
    ///
1734
    /// Note that this method only returns **some** item in the parallel
1735
    /// iterator that is not None from the map predicate. The item returned
1736
    /// may not be the **first** non-None value produced in the parallel
1737
    /// sequence, since the entire sequence is mapped over in parallel.
1738
    ///
1739
    /// # Examples
1740
    ///
1741
    /// ```
1742
    /// use rayon::prelude::*;
1743
    ///
1744
    /// let c = ["lol", "NaN", "5", "5"];
1745
    ///
1746
    /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1747
    ///
1748
    /// assert_eq!(found_number, Some(5));
1749
    /// ```
1750
0
    fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1751
0
    where
1752
0
        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1753
0
        R: Send,
1754
    {
1755
0
        fn yes<T>(_: &T) -> bool {
1756
0
            true
1757
0
        }
1758
0
        self.filter_map(predicate).find_any(yes)
1759
0
    }
1760
1761
    /// Applies the given predicate to the items in the parallel iterator and
1762
    /// returns the sequentially **first** non-None result of the map operation.
1763
    ///
1764
    /// Once a non-None value is produced from the map operation, all attempts
1765
    /// to the right of the match will be stopped, while attempts to the left
1766
    /// must continue in case an earlier match is found.
1767
    ///
1768
    /// Note that not all parallel iterators have a useful order, much like
1769
    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1770
    /// just want the first non-None value discovered anywhere in the iterator,
1771
    /// `find_map_any` is a better choice.
1772
    ///
1773
    /// # Examples
1774
    ///
1775
    /// ```
1776
    /// use rayon::prelude::*;
1777
    ///
1778
    /// let c = ["lol", "NaN", "2", "5"];
1779
    ///
1780
    /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1781
    ///
1782
    /// assert_eq!(first_number, Some(2));
1783
    /// ```
1784
0
    fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1785
0
    where
1786
0
        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1787
0
        R: Send,
1788
    {
1789
0
        fn yes<T>(_: &T) -> bool {
1790
0
            true
1791
0
        }
1792
0
        self.filter_map(predicate).find_first(yes)
1793
0
    }
1794
1795
    /// Applies the given predicate to the items in the parallel iterator and
1796
    /// returns the sequentially **last** non-None result of the map operation.
1797
    ///
1798
    /// Once a non-None value is produced from the map operation, all attempts
1799
    /// to the left of the match will be stopped, while attempts to the right
1800
    /// must continue in case a later match is found.
1801
    ///
1802
    /// Note that not all parallel iterators have a useful order, much like
1803
    /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1804
    /// just want the first non-None value discovered anywhere in the iterator,
1805
    /// `find_map_any` is a better choice.
1806
    ///
1807
    /// # Examples
1808
    ///
1809
    /// ```
1810
    /// use rayon::prelude::*;
1811
    ///
1812
    /// let c = ["lol", "NaN", "2", "5"];
1813
    ///
1814
    /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1815
    ///
1816
    /// assert_eq!(last_number, Some(5));
1817
    /// ```
1818
0
    fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1819
0
    where
1820
0
        P: Fn(Self::Item) -> Option<R> + Sync + Send,
1821
0
        R: Send,
1822
    {
1823
0
        fn yes<T>(_: &T) -> bool {
1824
0
            true
1825
0
        }
1826
0
        self.filter_map(predicate).find_last(yes)
1827
0
    }
1828
1829
    #[doc(hidden)]
1830
    #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1831
                         `find_first`, or `find_last`")]
1832
0
    fn find<P>(self, predicate: P) -> Option<Self::Item>
1833
0
    where
1834
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
1835
    {
1836
0
        self.find_any(predicate)
1837
0
    }
1838
1839
    /// Searches for **some** item in the parallel iterator that
1840
    /// matches the given predicate, and if so returns true.  Once
1841
    /// a match is found, we'll attempt to stop process the rest
1842
    /// of the items.  Proving that there's no match, returning false,
1843
    /// does require visiting every item.
1844
    ///
1845
    /// # Examples
1846
    ///
1847
    /// ```
1848
    /// use rayon::prelude::*;
1849
    ///
1850
    /// let a = [0, 12, 3, 4, 0, 23, 0];
1851
    ///
1852
    /// let is_valid = a.par_iter().any(|&x| x > 10);
1853
    ///
1854
    /// assert!(is_valid);
1855
    /// ```
1856
0
    fn any<P>(self, predicate: P) -> bool
1857
0
    where
1858
0
        P: Fn(Self::Item) -> bool + Sync + Send,
1859
    {
1860
0
        self.map(predicate).find_any(bool::clone).is_some()
1861
0
    }
1862
1863
    /// Tests that every item in the parallel iterator matches the given
1864
    /// predicate, and if so returns true.  If a counter-example is found,
1865
    /// we'll attempt to stop processing more items, then return false.
1866
    ///
1867
    /// # Examples
1868
    ///
1869
    /// ```
1870
    /// use rayon::prelude::*;
1871
    ///
1872
    /// let a = [0, 12, 3, 4, 0, 23, 0];
1873
    ///
1874
    /// let is_valid = a.par_iter().all(|&x| x > 10);
1875
    ///
1876
    /// assert!(!is_valid);
1877
    /// ```
1878
0
    fn all<P>(self, predicate: P) -> bool
1879
0
    where
1880
0
        P: Fn(Self::Item) -> bool + Sync + Send,
1881
    {
1882
        #[inline]
1883
0
        fn is_false(x: &bool) -> bool {
1884
0
            !x
1885
0
        }
1886
1887
0
        self.map(predicate).find_any(is_false).is_none()
1888
0
    }
1889
1890
    /// Creates an iterator over the `Some` items of this iterator, halting
1891
    /// as soon as any `None` is found.
1892
    ///
1893
    /// # Examples
1894
    ///
1895
    /// ```
1896
    /// use rayon::prelude::*;
1897
    /// use std::sync::atomic::{AtomicUsize, Ordering};
1898
    ///
1899
    /// let counter = AtomicUsize::new(0);
1900
    /// let value = (0_i32..2048)
1901
    ///     .into_par_iter()
1902
    ///     .map(|x| {
1903
    ///              counter.fetch_add(1, Ordering::SeqCst);
1904
    ///              if x < 1024 { Some(x) } else { None }
1905
    ///          })
1906
    ///     .while_some()
1907
    ///     .max();
1908
    ///
1909
    /// assert!(value < Some(1024));
1910
    /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1911
    /// ```
1912
0
    fn while_some<T>(self) -> WhileSome<Self>
1913
0
    where
1914
0
        Self: ParallelIterator<Item = Option<T>>,
1915
0
        T: Send,
1916
    {
1917
0
        WhileSome::new(self)
1918
0
    }
1919
1920
    /// Wraps an iterator with a fuse in case of panics, to halt all threads
1921
    /// as soon as possible.
1922
    ///
1923
    /// Panics within parallel iterators are always propagated to the caller,
1924
    /// but they don't always halt the rest of the iterator right away, due to
1925
    /// the internal semantics of [`join`]. This adaptor makes a greater effort
1926
    /// to stop processing other items sooner, with the cost of additional
1927
    /// synchronization overhead, which may also inhibit some optimizations.
1928
    ///
1929
    /// [`join`]: crate::join()#panics
1930
    ///
1931
    /// # Examples
1932
    ///
1933
    /// If this code didn't use `panic_fuse()`, it would continue processing
1934
    /// many more items in other threads (with long sleep delays) before the
1935
    /// panic is finally propagated.
1936
    ///
1937
    /// ```should_panic
1938
    /// use rayon::prelude::*;
1939
    /// use std::{thread, time};
1940
    ///
1941
    /// (0..1_000_000)
1942
    ///     .into_par_iter()
1943
    ///     .panic_fuse()
1944
    ///     .for_each(|i| {
1945
    ///         // simulate some work
1946
    ///         thread::sleep(time::Duration::from_secs(1));
1947
    ///         assert!(i > 0); // oops!
1948
    ///     });
1949
    /// ```
1950
0
    fn panic_fuse(self) -> PanicFuse<Self> {
1951
0
        PanicFuse::new(self)
1952
0
    }
1953
1954
    /// Creates a fresh collection containing all the elements produced
1955
    /// by this parallel iterator.
1956
    ///
1957
    /// You may prefer [`collect_into_vec()`] implemented on
1958
    /// [`IndexedParallelIterator`], if your underlying iterator also implements
1959
    /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1960
    /// of how many elements the iterator contains, and even allows you to reuse
1961
    /// an existing vector's backing store rather than allocating a fresh vector.
1962
    ///
1963
    /// See also [`collect_vec_list()`] for collecting into a
1964
    /// `LinkedList<Vec<T>>`.
1965
    ///
1966
    /// [`collect_into_vec()`]: IndexedParallelIterator::collect_into_vec()
1967
    /// [`collect_vec_list()`]: Self::collect_vec_list()
1968
    ///
1969
    /// # Examples
1970
    ///
1971
    /// ```
1972
    /// use rayon::prelude::*;
1973
    ///
1974
    /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1975
    ///
1976
    /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1977
    ///
1978
    /// assert_eq!(sync_vec, async_vec);
1979
    /// ```
1980
    ///
1981
    /// You can collect a pair of collections like [`unzip`](#method.unzip)
1982
    /// for paired items:
1983
    ///
1984
    /// ```
1985
    /// use rayon::prelude::*;
1986
    ///
1987
    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1988
    /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1989
    ///
1990
    /// assert_eq!(first, [0, 1, 2, 3]);
1991
    /// assert_eq!(second, [1, 2, 3, 4]);
1992
    /// ```
1993
    ///
1994
    /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1995
    ///
1996
    /// ```
1997
    /// use rayon::prelude::*;
1998
    /// use rayon::iter::Either;
1999
    ///
2000
    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2001
    ///     if x % 2 == 0 {
2002
    ///         Either::Left(x * 4)
2003
    ///     } else {
2004
    ///         Either::Right(x * 3)
2005
    ///     }
2006
    /// }).collect();
2007
    ///
2008
    /// assert_eq!(left, [0, 8, 16, 24]);
2009
    /// assert_eq!(right, [3, 9, 15, 21]);
2010
    /// ```
2011
    ///
2012
    /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2013
    ///
2014
    /// ```
2015
    /// use rayon::prelude::*;
2016
    /// use rayon::iter::Either;
2017
    ///
2018
    /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2019
    ///     = (0..8).into_par_iter().map(|x| {
2020
    ///         if x % 2 == 0 {
2021
    ///             (x, Either::Left(x * 4))
2022
    ///         } else {
2023
    ///             (-x, Either::Right(x * 3))
2024
    ///         }
2025
    ///     }).collect();
2026
    ///
2027
    /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2028
    /// assert_eq!(left, [0, 8, 16, 24]);
2029
    /// assert_eq!(right, [3, 9, 15, 21]);
2030
    /// ```
2031
    ///
2032
    /// All of that can _also_ be combined with short-circuiting collection of
2033
    /// `Result` or `Option` types:
2034
    ///
2035
    /// ```
2036
    /// use rayon::prelude::*;
2037
    /// use rayon::iter::Either;
2038
    ///
2039
    /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2040
    ///     = (0..8).into_par_iter().map(|x| {
2041
    ///         if x > 5 {
2042
    ///             Err(x)
2043
    ///         } else if x % 2 == 0 {
2044
    ///             Ok((x, Either::Left(x * 4)))
2045
    ///         } else {
2046
    ///             Ok((-x, Either::Right(x * 3)))
2047
    ///         }
2048
    ///     }).collect();
2049
    ///
2050
    /// let error = result.unwrap_err();
2051
    /// assert!(error == 6 || error == 7);
2052
    /// ```
2053
0
    fn collect<C>(self) -> C
2054
0
    where
2055
0
        C: FromParallelIterator<Self::Item>,
2056
    {
2057
0
        C::from_par_iter(self)
2058
0
    }
2059
2060
    /// Unzips the items of a parallel iterator into a pair of arbitrary
2061
    /// `ParallelExtend` containers.
2062
    ///
2063
    /// You may prefer to use `unzip_into_vecs()`, which allocates more
2064
    /// efficiently with precise knowledge of how many elements the
2065
    /// iterator contains, and even allows you to reuse existing
2066
    /// vectors' backing stores rather than allocating fresh vectors.
2067
    ///
2068
    /// # Examples
2069
    ///
2070
    /// ```
2071
    /// use rayon::prelude::*;
2072
    ///
2073
    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2074
    ///
2075
    /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2076
    ///
2077
    /// assert_eq!(left, [0, 1, 2, 3]);
2078
    /// assert_eq!(right, [1, 2, 3, 4]);
2079
    /// ```
2080
    ///
2081
    /// Nested pairs can be unzipped too.
2082
    ///
2083
    /// ```
2084
    /// use rayon::prelude::*;
2085
    ///
2086
    /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2087
    ///     .map(|i| (i, (i * i, i * i * i)))
2088
    ///     .unzip();
2089
    ///
2090
    /// assert_eq!(values, [0, 1, 2, 3]);
2091
    /// assert_eq!(squares, [0, 1, 4, 9]);
2092
    /// assert_eq!(cubes, [0, 1, 8, 27]);
2093
    /// ```
2094
0
    fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2095
0
    where
2096
0
        Self: ParallelIterator<Item = (A, B)>,
2097
0
        FromA: Default + Send + ParallelExtend<A>,
2098
0
        FromB: Default + Send + ParallelExtend<B>,
2099
0
        A: Send,
2100
0
        B: Send,
2101
    {
2102
0
        unzip::unzip(self)
2103
0
    }
Unexecuted instantiation: <rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u16>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u16>::{closure#0}> as rayon::iter::ParallelIterator>::unzip::<alloc::vec::Vec<u8>, rav1e::stats::EncoderStats, alloc::vec::Vec<alloc::vec::Vec<u8>>, alloc::vec::Vec<rav1e::stats::EncoderStats>>
Unexecuted instantiation: <rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u8>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u8>::{closure#0}> as rayon::iter::ParallelIterator>::unzip::<alloc::vec::Vec<u8>, rav1e::stats::EncoderStats, alloc::vec::Vec<alloc::vec::Vec<u8>>, alloc::vec::Vec<rav1e::stats::EncoderStats>>
Unexecuted instantiation: <_ as rayon::iter::ParallelIterator>::unzip::<_, _, _, _>
2104
2105
    /// Partitions the items of a parallel iterator into a pair of arbitrary
2106
    /// `ParallelExtend` containers.  Items for which the `predicate` returns
2107
    /// true go into the first container, and the rest go into the second.
2108
    ///
2109
    /// Note: unlike the standard `Iterator::partition`, this allows distinct
2110
    /// collection types for the left and right items.  This is more flexible,
2111
    /// but may require new type annotations when converting sequential code
2112
    /// that used type inference assuming the two were the same.
2113
    ///
2114
    /// # Examples
2115
    ///
2116
    /// ```
2117
    /// use rayon::prelude::*;
2118
    ///
2119
    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2120
    ///
2121
    /// assert_eq!(left, [0, 2, 4, 6]);
2122
    /// assert_eq!(right, [1, 3, 5, 7]);
2123
    /// ```
2124
0
    fn partition<A, B, P>(self, predicate: P) -> (A, B)
2125
0
    where
2126
0
        A: Default + Send + ParallelExtend<Self::Item>,
2127
0
        B: Default + Send + ParallelExtend<Self::Item>,
2128
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
2129
    {
2130
0
        unzip::partition(self, predicate)
2131
0
    }
2132
2133
    /// Partitions and maps the items of a parallel iterator into a pair of
2134
    /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2135
    /// the first container, and `Either::Right` items go into the second.
2136
    ///
2137
    /// # Examples
2138
    ///
2139
    /// ```
2140
    /// use rayon::prelude::*;
2141
    /// use rayon::iter::Either;
2142
    ///
2143
    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2144
    ///     .partition_map(|x| {
2145
    ///         if x % 2 == 0 {
2146
    ///             Either::Left(x * 4)
2147
    ///         } else {
2148
    ///             Either::Right(x * 3)
2149
    ///         }
2150
    ///     });
2151
    ///
2152
    /// assert_eq!(left, [0, 8, 16, 24]);
2153
    /// assert_eq!(right, [3, 9, 15, 21]);
2154
    /// ```
2155
    ///
2156
    /// Nested `Either` enums can be split as well.
2157
    ///
2158
    /// ```
2159
    /// use rayon::prelude::*;
2160
    /// use rayon::iter::Either::*;
2161
    ///
2162
    /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2163
    ///     .into_par_iter()
2164
    ///     .partition_map(|x| match (x % 3, x % 5) {
2165
    ///         (0, 0) => Left(Left(x)),
2166
    ///         (0, _) => Left(Right(x)),
2167
    ///         (_, 0) => Right(Left(x)),
2168
    ///         (_, _) => Right(Right(x)),
2169
    ///     });
2170
    ///
2171
    /// assert_eq!(fizzbuzz, [15]);
2172
    /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2173
    /// assert_eq!(buzz, [5, 10]);
2174
    /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2175
    /// ```
2176
0
    fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2177
0
    where
2178
0
        A: Default + Send + ParallelExtend<L>,
2179
0
        B: Default + Send + ParallelExtend<R>,
2180
0
        P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2181
0
        L: Send,
2182
0
        R: Send,
2183
    {
2184
0
        unzip::partition_map(self, predicate)
2185
0
    }
2186
2187
    /// Intersperses clones of an element between items of this iterator.
2188
    ///
2189
    /// # Examples
2190
    ///
2191
    /// ```
2192
    /// use rayon::prelude::*;
2193
    ///
2194
    /// let x = vec![1, 2, 3];
2195
    /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2196
    ///
2197
    /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2198
    /// ```
2199
0
    fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2200
0
    where
2201
0
        Self::Item: Clone,
2202
    {
2203
0
        Intersperse::new(self, element)
2204
0
    }
2205
2206
    /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2207
    ///
2208
    /// This is similar to [`IndexedParallelIterator::take`] without being
2209
    /// constrained to the "first" `n` of the original iterator order. The
2210
    /// taken items will still maintain their relative order where that is
2211
    /// visible in `collect`, `reduce`, and similar outputs.
2212
    ///
2213
    /// # Examples
2214
    ///
2215
    /// ```
2216
    /// use rayon::prelude::*;
2217
    ///
2218
    /// let result: Vec<_> = (0..100)
2219
    ///     .into_par_iter()
2220
    ///     .filter(|&x| x % 2 == 0)
2221
    ///     .take_any(5)
2222
    ///     .collect();
2223
    ///
2224
    /// assert_eq!(result.len(), 5);
2225
    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2226
    /// ```
2227
0
    fn take_any(self, n: usize) -> TakeAny<Self> {
2228
0
        TakeAny::new(self, n)
2229
0
    }
2230
2231
    /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2232
    ///
2233
    /// This is similar to [`IndexedParallelIterator::skip`] without being
2234
    /// constrained to the "first" `n` of the original iterator order. The
2235
    /// remaining items will still maintain their relative order where that is
2236
    /// visible in `collect`, `reduce`, and similar outputs.
2237
    ///
2238
    /// # Examples
2239
    ///
2240
    /// ```
2241
    /// use rayon::prelude::*;
2242
    ///
2243
    /// let result: Vec<_> = (0..100)
2244
    ///     .into_par_iter()
2245
    ///     .filter(|&x| x % 2 == 0)
2246
    ///     .skip_any(5)
2247
    ///     .collect();
2248
    ///
2249
    /// assert_eq!(result.len(), 45);
2250
    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2251
    /// ```
2252
0
    fn skip_any(self, n: usize) -> SkipAny<Self> {
2253
0
        SkipAny::new(self, n)
2254
0
    }
2255
2256
    /// Creates an iterator that takes elements from *anywhere* in the original iterator
2257
    /// until the given `predicate` returns `false`.
2258
    ///
2259
    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2260
    /// global condition unrelated to the item itself, or some combination thereof.
2261
    ///
2262
    /// If parallel calls to the `predicate` race and give different results, then the
2263
    /// `true` results will still take those particular items, while respecting the `false`
2264
    /// result from elsewhere to skip any further items.
2265
    ///
2266
    /// This is similar to [`Iterator::take_while`] without being constrained to the original
2267
    /// iterator order. The taken items will still maintain their relative order where that is
2268
    /// visible in `collect`, `reduce`, and similar outputs.
2269
    ///
2270
    /// # Examples
2271
    ///
2272
    /// ```
2273
    /// use rayon::prelude::*;
2274
    ///
2275
    /// let result: Vec<_> = (0..100)
2276
    ///     .into_par_iter()
2277
    ///     .take_any_while(|x| *x < 50)
2278
    ///     .collect();
2279
    ///
2280
    /// assert!(result.len() <= 50);
2281
    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2282
    /// ```
2283
    ///
2284
    /// ```
2285
    /// use rayon::prelude::*;
2286
    /// use std::sync::atomic::AtomicUsize;
2287
    /// use std::sync::atomic::Ordering::Relaxed;
2288
    ///
2289
    /// // Collect any group of items that sum <= 1000
2290
    /// let quota = AtomicUsize::new(1000);
2291
    /// let result: Vec<_> = (0_usize..100)
2292
    ///     .into_par_iter()
2293
    ///     .take_any_while(|&x| {
2294
    ///         quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2295
    ///             .is_ok()
2296
    ///     })
2297
    ///     .collect();
2298
    ///
2299
    /// let sum = result.iter().sum::<usize>();
2300
    /// assert!(matches!(sum, 902..=1000));
2301
    /// ```
2302
0
    fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2303
0
    where
2304
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
2305
    {
2306
0
        TakeAnyWhile::new(self, predicate)
2307
0
    }
2308
2309
    /// Creates an iterator that skips elements from *anywhere* in the original iterator
2310
    /// until the given `predicate` returns `false`.
2311
    ///
2312
    /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2313
    /// global condition unrelated to the item itself, or some combination thereof.
2314
    ///
2315
    /// If parallel calls to the `predicate` race and give different results, then the
2316
    /// `true` results will still skip those particular items, while respecting the `false`
2317
    /// result from elsewhere to skip any further items.
2318
    ///
2319
    /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2320
    /// iterator order. The remaining items will still maintain their relative order where that is
2321
    /// visible in `collect`, `reduce`, and similar outputs.
2322
    ///
2323
    /// # Examples
2324
    ///
2325
    /// ```
2326
    /// use rayon::prelude::*;
2327
    ///
2328
    /// let result: Vec<_> = (0..100)
2329
    ///     .into_par_iter()
2330
    ///     .skip_any_while(|x| *x < 50)
2331
    ///     .collect();
2332
    ///
2333
    /// assert!(result.len() >= 50);
2334
    /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2335
    /// ```
2336
0
    fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2337
0
    where
2338
0
        P: Fn(&Self::Item) -> bool + Sync + Send,
2339
    {
2340
0
        SkipAnyWhile::new(self, predicate)
2341
0
    }
2342
2343
    /// Collects this iterator into a linked list of vectors.
2344
    ///
2345
    /// This is useful when you need to condense a parallel iterator into a collection,
2346
    /// but have no specific requirements for what that collection should be. If you
2347
    /// plan to store the collection longer-term, `Vec<T>` is, as always, likely the
2348
    /// best default choice, despite the overhead that comes from concatenating each
2349
    /// vector. Or, if this is an `IndexedParallelIterator`, you should also prefer to
2350
    /// just collect to a `Vec<T>`.
2351
    ///
2352
    /// Internally, most [`FromParallelIterator`]/[`ParallelExtend`] implementations
2353
    /// use this strategy; each job collecting their chunk of the iterator to a `Vec<T>`
2354
    /// and those chunks getting merged into a `LinkedList`, before then extending the
2355
    /// collection with each vector. This is a very efficient way to collect an
2356
    /// unindexed parallel iterator, without much intermediate data movement.
2357
    ///
2358
    /// # Examples
2359
    ///
2360
    /// ```
2361
    /// # use std::collections::LinkedList;
2362
    /// use rayon::prelude::*;
2363
    ///
2364
    /// let result: LinkedList<Vec<_>> = (0..=100)
2365
    ///     .into_par_iter()
2366
    ///     .filter(|x| x % 2 == 0)
2367
    ///     .flat_map(|x| 0..x)
2368
    ///     .collect_vec_list();
2369
    ///
2370
    /// // `par_iter.collect_vec_list().into_iter().flatten()` turns
2371
    /// // a parallel iterator into a serial one
2372
    /// let total_len = result.into_iter().flatten().count();
2373
    /// assert_eq!(total_len, 2550);
2374
    /// ```
2375
0
    fn collect_vec_list(self) -> LinkedList<Vec<Self::Item>> {
2376
0
        match extend::fast_collect(self) {
2377
0
            Either::Left(vec) => {
2378
0
                let mut list = LinkedList::new();
2379
0
                if !vec.is_empty() {
2380
0
                    list.push_back(vec);
2381
0
                }
2382
0
                list
2383
            }
2384
0
            Either::Right(list) => list,
2385
        }
2386
0
    }
2387
2388
    /// Internal method used to define the behavior of this parallel
2389
    /// iterator. You should not need to call this directly.
2390
    ///
2391
    /// This method causes the iterator `self` to start producing
2392
    /// items and to feed them to the consumer `consumer` one by one.
2393
    /// It may split the consumer before doing so to create the
2394
    /// opportunity to produce in parallel.
2395
    ///
2396
    /// See the [README] for more details on the internals of parallel
2397
    /// iterators.
2398
    ///
2399
    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
2400
    fn drive_unindexed<C>(self, consumer: C) -> C::Result
2401
    where
2402
        C: UnindexedConsumer<Self::Item>;
2403
2404
    /// Internal method used to define the behavior of this parallel
2405
    /// iterator. You should not need to call this directly.
2406
    ///
2407
    /// Returns the number of items produced by this iterator, if known
2408
    /// statically. This can be used by consumers to trigger special fast
2409
    /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2410
    /// use the (indexed) `Consumer` methods when driving a consumer, such
2411
    /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2412
    /// other `UnindexedConsumer` methods -- or returning an inaccurate
2413
    /// value -- may result in panics.
2414
    ///
2415
    /// This method is currently used to optimize `collect` for want
2416
    /// of true Rust specialization; it may be removed when
2417
    /// specialization is stable.
2418
0
    fn opt_len(&self) -> Option<usize> {
2419
0
        None
2420
0
    }
2421
}
2422
2423
impl<T: ParallelIterator> IntoParallelIterator for T {
2424
    type Iter = T;
2425
    type Item = T::Item;
2426
2427
0
    fn into_par_iter(self) -> T {
2428
0
        self
2429
0
    }
Unexecuted instantiation: <rayon::iter::unzip::UnzipA<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u16>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u16>::{closure#0}>, rayon::iter::unzip::Unzip, alloc::vec::Vec<rav1e::stats::EncoderStats>> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::unzip::UnzipB<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u16>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u16>::{closure#0}>, rayon::iter::unzip::Unzip, rayon::iter::collect::consumer::CollectConsumer<alloc::vec::Vec<u8>>> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::unzip::UnzipB<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u16>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u16>::{closure#0}>, rayon::iter::unzip::Unzip, rayon::iter::extend::ListVecConsumer> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::unzip::UnzipA<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u8>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u8>::{closure#0}>, rayon::iter::unzip::Unzip, alloc::vec::Vec<rav1e::stats::EncoderStats>> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::unzip::UnzipB<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u8>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u8>::{closure#0}>, rayon::iter::unzip::Unzip, rayon::iter::collect::consumer::CollectConsumer<alloc::vec::Vec<u8>>> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::unzip::UnzipB<rayon::iter::map::Map<rayon::vec::IntoIter<(rav1e::tiling::tiler::TileContextMut<u8>, &mut rav1e::context::cdf_context::CDFContext)>, rav1e::encoder::encode_tile_group<u8>::{closure#0}>, rayon::iter::unzip::Unzip, rayon::iter::extend::ListVecConsumer> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<i8> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<u8> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<isize> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<usize> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<i32> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<u32> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<i128> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<u128> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<i16> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<u16> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<i64> as rayon::iter::IntoParallelIterator>::into_par_iter
Unexecuted instantiation: <rayon::iter::once::Once<u64> as rayon::iter::IntoParallelIterator>::into_par_iter
2430
}
2431
2432
/// An iterator that supports "random access" to its data, meaning
2433
/// that you can split it at arbitrary indices and draw data from
2434
/// those points.
2435
///
2436
/// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2437
// Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2438
#[allow(clippy::len_without_is_empty)]
2439
pub trait IndexedParallelIterator: ParallelIterator {
2440
    /// Divides an iterator into sequential blocks of exponentially-increasing size.
2441
    ///
2442
    /// Normally, parallel iterators are recursively divided into tasks in parallel.
2443
    /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2444
    /// of parallel iterators of increasing sizes.
2445
    /// Sizes grow exponentially in order to avoid creating
2446
    /// too many blocks. This also allows to balance the current block with all previous ones.
2447
    ///
2448
    /// This can have many applications but the most notable ones are:
2449
    /// - better performance with [`find_first()`][ParallelIterator::find_first]
2450
    /// - more predictable performance with [`find_any()`][ParallelIterator::find_any]
2451
    ///   or any interruptible computation
2452
    ///
2453
    /// # Examples
2454
    ///
2455
    /// ```
2456
    /// use rayon::prelude::*;
2457
    /// assert_eq!((0..10_000).into_par_iter()
2458
    ///                       .by_exponential_blocks()
2459
    ///                       .find_first(|&e| e==4_999), Some(4_999))
2460
    /// ```
2461
    ///
2462
    /// In this example, without blocks, rayon will split the initial range into two but all work
2463
    /// on the right hand side (from 5,000 onwards) is **useless** since the sequential algorithm
2464
    /// never goes there. This means that if two threads are used there will be **no** speedup **at
2465
    /// all**.
2466
    ///
2467
    /// `by_exponential_blocks` on the other hand will start with the leftmost range from 0
2468
    /// to `p` (threads number), continue with p to 3p, the 3p to 7p...
2469
    ///
2470
    /// Each subrange is treated in parallel, while all subranges are treated sequentially.
2471
    /// We therefore ensure a logarithmic number of blocks (and overhead) while guaranteeing
2472
    /// we stop at the first block containing the searched data.
2473
0
    fn by_exponential_blocks(self) -> ExponentialBlocks<Self> {
2474
0
        ExponentialBlocks::new(self)
2475
0
    }
2476
2477
    /// Divides an iterator into sequential blocks of the given size.
2478
    ///
2479
    /// Normally, parallel iterators are recursively divided into tasks in parallel.
2480
    /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2481
    /// of parallel iterators of given `block_size`.
2482
    /// The main application is to obtain better
2483
    /// memory locality (especially if the reduce operation re-use folded data).
2484
    ///
2485
    /// **Panics** if `block_size` is 0.
2486
    ///
2487
    /// # Example
2488
    /// ```
2489
    /// use rayon::prelude::*;
2490
    /// // during most reductions v1 and v2 fit the cache
2491
    /// let v = (0u32..10_000_000)
2492
    ///     .into_par_iter()
2493
    ///     .by_uniform_blocks(1_000_000)
2494
    ///     .fold(Vec::new, |mut v, e| { v.push(e); v})
2495
    ///     .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
2496
    /// assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());
2497
    /// ```
2498
    #[track_caller]
2499
0
    fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> {
2500
0
        assert!(block_size != 0, "block_size must not be zero");
2501
0
        UniformBlocks::new(self, block_size)
2502
0
    }
2503
2504
    /// Collects the results of the iterator into the specified
2505
    /// vector. The vector is always cleared before execution
2506
    /// begins. If possible, reusing the vector across calls can lead
2507
    /// to better performance since it reuses the same backing buffer.
2508
    ///
2509
    /// # Examples
2510
    ///
2511
    /// ```
2512
    /// use rayon::prelude::*;
2513
    ///
2514
    /// // any prior data will be cleared
2515
    /// let mut vec = vec![-1, -2, -3];
2516
    ///
2517
    /// (0..5).into_par_iter()
2518
    ///     .collect_into_vec(&mut vec);
2519
    ///
2520
    /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2521
    /// ```
2522
0
    fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2523
0
        collect::collect_into_vec(self, target);
2524
0
    }
2525
2526
    /// Unzips the results of the iterator into the specified
2527
    /// vectors. The vectors are always cleared before execution
2528
    /// begins. If possible, reusing the vectors across calls can lead
2529
    /// to better performance since they reuse the same backing buffer.
2530
    ///
2531
    /// # Examples
2532
    ///
2533
    /// ```
2534
    /// use rayon::prelude::*;
2535
    ///
2536
    /// // any prior data will be cleared
2537
    /// let mut left = vec![42; 10];
2538
    /// let mut right = vec![-1; 10];
2539
    ///
2540
    /// (10..15).into_par_iter()
2541
    ///     .enumerate()
2542
    ///     .unzip_into_vecs(&mut left, &mut right);
2543
    ///
2544
    /// assert_eq!(left, [0, 1, 2, 3, 4]);
2545
    /// assert_eq!(right, [10, 11, 12, 13, 14]);
2546
    /// ```
2547
0
    fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2548
0
    where
2549
0
        Self: IndexedParallelIterator<Item = (A, B)>,
2550
0
        A: Send,
2551
0
        B: Send,
2552
    {
2553
0
        collect::unzip_into_vecs(self, left, right);
2554
0
    }
2555
2556
    /// Iterates over tuples `(A, B)`, where the items `A` are from
2557
    /// this iterator and `B` are from the iterator given as argument.
2558
    /// Like the `zip` method on ordinary iterators, if the two
2559
    /// iterators are of unequal length, you only get the items they
2560
    /// have in common.
2561
    ///
2562
    /// # Examples
2563
    ///
2564
    /// ```
2565
    /// use rayon::prelude::*;
2566
    ///
2567
    /// let result: Vec<_> = (1..4)
2568
    ///     .into_par_iter()
2569
    ///     .zip(vec!['a', 'b', 'c'])
2570
    ///     .collect();
2571
    ///
2572
    /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2573
    /// ```
2574
0
    fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2575
0
    where
2576
0
        Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2577
    {
2578
0
        Zip::new(self, zip_op.into_par_iter())
2579
0
    }
2580
2581
    /// The same as `Zip`, but requires that both iterators have the same length.
2582
    ///
2583
    /// # Panics
2584
    /// Will panic if `self` and `zip_op` are not the same length.
2585
    ///
2586
    /// ```should_panic
2587
    /// use rayon::prelude::*;
2588
    ///
2589
    /// let one = [1u8];
2590
    /// let two = [2u8, 2];
2591
    /// let one_iter = one.par_iter();
2592
    /// let two_iter = two.par_iter();
2593
    ///
2594
    /// // this will panic
2595
    /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2596
    ///
2597
    /// // we should never get here
2598
    /// assert_eq!(1, zipped.len());
2599
    /// ```
2600
    #[track_caller]
2601
0
    fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2602
0
    where
2603
0
        Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
2604
    {
2605
0
        let zip_op_iter = zip_op.into_par_iter();
2606
0
        assert_eq!(
2607
0
            self.len(),
2608
0
            zip_op_iter.len(),
2609
0
            "iterators must have the same length"
2610
        );
2611
0
        ZipEq::new(self, zip_op_iter)
2612
0
    }
2613
2614
    /// Interleaves elements of this iterator and the other given
2615
    /// iterator. Alternately yields elements from this iterator and
2616
    /// the given iterator, until both are exhausted. If one iterator
2617
    /// is exhausted before the other, the last elements are provided
2618
    /// from the other.
2619
    ///
2620
    /// # Examples
2621
    ///
2622
    /// ```
2623
    /// use rayon::prelude::*;
2624
    /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2625
    /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2626
    /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2627
    /// ```
2628
0
    fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2629
0
    where
2630
0
        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2631
    {
2632
0
        Interleave::new(self, other.into_par_iter())
2633
0
    }
2634
2635
    /// Interleaves elements of this iterator and the other given
2636
    /// iterator, until one is exhausted.
2637
    ///
2638
    /// # Examples
2639
    ///
2640
    /// ```
2641
    /// use rayon::prelude::*;
2642
    /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2643
    /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2644
    /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2645
    /// ```
2646
0
    fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2647
0
    where
2648
0
        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2649
    {
2650
0
        InterleaveShortest::new(self, other.into_par_iter())
2651
0
    }
2652
2653
    /// Splits an iterator up into fixed-size chunks.
2654
    ///
2655
    /// Returns an iterator that returns `Vec`s of the given number of elements.
2656
    /// If the number of elements in the iterator is not divisible by `chunk_size`,
2657
    /// the last chunk may be shorter than `chunk_size`.
2658
    ///
2659
    /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2660
    /// slices, without having to allocate intermediate `Vec`s for the chunks.
2661
    ///
2662
    /// [`par_chunks()`]: crate::slice::ParallelSlice::par_chunks()
2663
    /// [`par_chunks_mut()`]: crate::slice::ParallelSliceMut::par_chunks_mut()
2664
    ///
2665
    /// **Panics** if `chunk_size` is 0.
2666
    ///
2667
    /// # Examples
2668
    ///
2669
    /// ```
2670
    /// use rayon::prelude::*;
2671
    /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2672
    /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2673
    /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2674
    /// ```
2675
    #[track_caller]
2676
0
    fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2677
0
        assert!(chunk_size != 0, "chunk_size must not be zero");
2678
0
        Chunks::new(self, chunk_size)
2679
0
    }
2680
2681
    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2682
    /// each chunk.
2683
    ///
2684
    /// Returns an iterator that produces a folded result for each chunk of items
2685
    /// produced by this iterator.
2686
    ///
2687
    /// This works essentially like:
2688
    ///
2689
    /// ```text
2690
    /// iter.chunks(chunk_size)
2691
    ///     .map(|chunk|
2692
    ///         chunk.into_iter()
2693
    ///             .fold(identity, fold_op)
2694
    ///     )
2695
    /// ```
2696
    ///
2697
    /// except there is no per-chunk allocation overhead.
2698
    ///
2699
    /// [`fold()`]: std::iter::Iterator#method.fold
2700
    ///
2701
    /// **Panics** if `chunk_size` is 0.
2702
    ///
2703
    /// # Examples
2704
    ///
2705
    /// ```
2706
    /// use rayon::prelude::*;
2707
    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2708
    /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2709
    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2710
    /// ```
2711
    #[track_caller]
2712
0
    fn fold_chunks<T, ID, F>(
2713
0
        self,
2714
0
        chunk_size: usize,
2715
0
        identity: ID,
2716
0
        fold_op: F,
2717
0
    ) -> FoldChunks<Self, ID, F>
2718
0
    where
2719
0
        ID: Fn() -> T + Send + Sync,
2720
0
        F: Fn(T, Self::Item) -> T + Send + Sync,
2721
0
        T: Send,
2722
    {
2723
0
        assert!(chunk_size != 0, "chunk_size must not be zero");
2724
0
        FoldChunks::new(self, chunk_size, identity, fold_op)
2725
0
    }
2726
2727
    /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2728
    /// each chunk.
2729
    ///
2730
    /// Returns an iterator that produces a folded result for each chunk of items
2731
    /// produced by this iterator.
2732
    ///
2733
    /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2734
    /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2735
    /// added synchronization.
2736
    ///
2737
    /// [`fold()`]: std::iter::Iterator#method.fold
2738
    ///
2739
    /// **Panics** if `chunk_size` is 0.
2740
    ///
2741
    /// # Examples
2742
    ///
2743
    /// ```
2744
    /// use rayon::prelude::*;
2745
    /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2746
    /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2747
    /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2748
    /// ```
2749
    #[track_caller]
2750
0
    fn fold_chunks_with<T, F>(
2751
0
        self,
2752
0
        chunk_size: usize,
2753
0
        init: T,
2754
0
        fold_op: F,
2755
0
    ) -> FoldChunksWith<Self, T, F>
2756
0
    where
2757
0
        T: Send + Clone,
2758
0
        F: Fn(T, Self::Item) -> T + Send + Sync,
2759
    {
2760
0
        assert!(chunk_size != 0, "chunk_size must not be zero");
2761
0
        FoldChunksWith::new(self, chunk_size, init, fold_op)
2762
0
    }
2763
2764
    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2765
    /// another.
2766
    ///
2767
    /// # Examples
2768
    ///
2769
    /// ```
2770
    /// use rayon::prelude::*;
2771
    /// use std::cmp::Ordering::*;
2772
    ///
2773
    /// let x = vec![1, 2, 3];
2774
    /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2775
    /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2776
    /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2777
    /// ```
2778
0
    fn cmp<I>(self, other: I) -> Ordering
2779
0
    where
2780
0
        I: IntoParallelIterator<Item = Self::Item, Iter: IndexedParallelIterator>,
2781
0
        Self::Item: Ord,
2782
    {
2783
        #[inline]
2784
0
        fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2785
0
            Ord::cmp(&x, &y)
2786
0
        }
2787
2788
        #[inline]
2789
0
        fn inequal(&ord: &Ordering) -> bool {
2790
0
            ord != Ordering::Equal
2791
0
        }
2792
2793
0
        let other = other.into_par_iter();
2794
0
        let ord_len = self.len().cmp(&other.len());
2795
0
        self.zip(other)
2796
0
            .map(ordering)
2797
0
            .find_first(inequal)
2798
0
            .unwrap_or(ord_len)
2799
0
    }
2800
2801
    /// Lexicographically compares the elements of this `ParallelIterator` with those of
2802
    /// another.
2803
    ///
2804
    /// # Examples
2805
    ///
2806
    /// ```
2807
    /// use rayon::prelude::*;
2808
    /// use std::cmp::Ordering::*;
2809
    ///
2810
    /// let x = vec![1.0, 2.0, 3.0];
2811
    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2812
    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2813
    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2814
    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, f64::NAN]), None);
2815
    /// ```
2816
0
    fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2817
0
    where
2818
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2819
0
        Self::Item: PartialOrd<I::Item>,
2820
    {
2821
        #[inline]
2822
0
        fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2823
0
            PartialOrd::partial_cmp(&x, &y)
2824
0
        }
2825
2826
        #[inline]
2827
0
        fn inequal(&ord: &Option<Ordering>) -> bool {
2828
0
            ord != Some(Ordering::Equal)
2829
0
        }
2830
2831
0
        let other = other.into_par_iter();
2832
0
        let ord_len = self.len().cmp(&other.len());
2833
0
        self.zip(other)
2834
0
            .map(ordering)
2835
0
            .find_first(inequal)
2836
0
            .unwrap_or(Some(ord_len))
2837
0
    }
2838
2839
    /// Determines if the elements of this `ParallelIterator`
2840
    /// are equal to those of another
2841
0
    fn eq<I>(self, other: I) -> bool
2842
0
    where
2843
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2844
0
        Self::Item: PartialEq<I::Item>,
2845
    {
2846
        #[inline]
2847
0
        fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2848
0
            PartialEq::eq(&x, &y)
2849
0
        }
2850
2851
0
        let other = other.into_par_iter();
2852
0
        self.len() == other.len() && self.zip(other).all(eq)
2853
0
    }
2854
2855
    /// Determines if the elements of this `ParallelIterator`
2856
    /// are unequal to those of another
2857
0
    fn ne<I>(self, other: I) -> bool
2858
0
    where
2859
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2860
0
        Self::Item: PartialEq<I::Item>,
2861
    {
2862
0
        !self.eq(other)
2863
0
    }
2864
2865
    /// Determines if the elements of this `ParallelIterator`
2866
    /// are lexicographically less than those of another.
2867
0
    fn lt<I>(self, other: I) -> bool
2868
0
    where
2869
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2870
0
        Self::Item: PartialOrd<I::Item>,
2871
    {
2872
0
        self.partial_cmp(other) == Some(Ordering::Less)
2873
0
    }
2874
2875
    /// Determines if the elements of this `ParallelIterator`
2876
    /// are less than or equal to those of another.
2877
0
    fn le<I>(self, other: I) -> bool
2878
0
    where
2879
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2880
0
        Self::Item: PartialOrd<I::Item>,
2881
    {
2882
0
        let ord = self.partial_cmp(other);
2883
0
        ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2884
0
    }
2885
2886
    /// Determines if the elements of this `ParallelIterator`
2887
    /// are lexicographically greater than those of another.
2888
0
    fn gt<I>(self, other: I) -> bool
2889
0
    where
2890
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2891
0
        Self::Item: PartialOrd<I::Item>,
2892
    {
2893
0
        self.partial_cmp(other) == Some(Ordering::Greater)
2894
0
    }
2895
2896
    /// Determines if the elements of this `ParallelIterator`
2897
    /// are greater than or equal to those of another.
2898
0
    fn ge<I>(self, other: I) -> bool
2899
0
    where
2900
0
        I: IntoParallelIterator<Iter: IndexedParallelIterator>,
2901
0
        Self::Item: PartialOrd<I::Item>,
2902
    {
2903
0
        let ord = self.partial_cmp(other);
2904
0
        ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2905
0
    }
2906
2907
    /// Yields an index along with each item.
2908
    ///
2909
    /// # Examples
2910
    ///
2911
    /// ```
2912
    /// use rayon::prelude::*;
2913
    ///
2914
    /// let chars = vec!['a', 'b', 'c'];
2915
    /// let result: Vec<_> = chars
2916
    ///     .into_par_iter()
2917
    ///     .enumerate()
2918
    ///     .collect();
2919
    ///
2920
    /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2921
    /// ```
2922
0
    fn enumerate(self) -> Enumerate<Self> {
2923
0
        Enumerate::new(self)
2924
0
    }
Unexecuted instantiation: <rayon::slice::IterMut<rav1e::tiling::plane_region::PlaneRegionMut<u16>> as rayon::iter::IndexedParallelIterator>::enumerate
Unexecuted instantiation: <rayon::slice::IterMut<rav1e::tiling::plane_region::PlaneRegionMut<u8>> as rayon::iter::IndexedParallelIterator>::enumerate
Unexecuted instantiation: <_ as rayon::iter::IndexedParallelIterator>::enumerate
2925
2926
    /// Creates an iterator that steps by the given amount
2927
    ///
2928
    /// # Examples
2929
    ///
2930
    /// ```
2931
    ///use rayon::prelude::*;
2932
    ///
2933
    /// let range = (3..10);
2934
    /// let result: Vec<i32> = range
2935
    ///    .into_par_iter()
2936
    ///    .step_by(3)
2937
    ///    .collect();
2938
    ///
2939
    /// assert_eq!(result, [3, 6, 9])
2940
    /// ```
2941
0
    fn step_by(self, step: usize) -> StepBy<Self> {
2942
0
        StepBy::new(self, step)
2943
0
    }
2944
2945
    /// Creates an iterator that skips the first `n` elements.
2946
    ///
2947
    /// # Examples
2948
    ///
2949
    /// ```
2950
    /// use rayon::prelude::*;
2951
    ///
2952
    /// let result: Vec<_> = (0..100)
2953
    ///     .into_par_iter()
2954
    ///     .skip(95)
2955
    ///     .collect();
2956
    ///
2957
    /// assert_eq!(result, [95, 96, 97, 98, 99]);
2958
    /// ```
2959
0
    fn skip(self, n: usize) -> Skip<Self> {
2960
0
        Skip::new(self, n)
2961
0
    }
2962
2963
    /// Creates an iterator that yields the first `n` elements.
2964
    ///
2965
    /// # Examples
2966
    ///
2967
    /// ```
2968
    /// use rayon::prelude::*;
2969
    ///
2970
    /// let result: Vec<_> = (0..100)
2971
    ///     .into_par_iter()
2972
    ///     .take(5)
2973
    ///     .collect();
2974
    ///
2975
    /// assert_eq!(result, [0, 1, 2, 3, 4]);
2976
    /// ```
2977
0
    fn take(self, n: usize) -> Take<Self> {
2978
0
        Take::new(self, n)
2979
0
    }
2980
2981
    /// Searches for **some** item in the parallel iterator that
2982
    /// matches the given predicate, and returns its index.  Like
2983
    /// `ParallelIterator::find_any`, the parallel search will not
2984
    /// necessarily find the **first** match, and once a match is
2985
    /// found we'll attempt to stop processing any more.
2986
    ///
2987
    /// # Examples
2988
    ///
2989
    /// ```
2990
    /// use rayon::prelude::*;
2991
    ///
2992
    /// let a = [1, 2, 3, 3];
2993
    ///
2994
    /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2995
    /// assert!(i == 2 || i == 3);
2996
    ///
2997
    /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2998
    /// ```
2999
0
    fn position_any<P>(self, predicate: P) -> Option<usize>
3000
0
    where
3001
0
        P: Fn(Self::Item) -> bool + Sync + Send,
3002
    {
3003
        #[inline]
3004
0
        fn check(&(_, p): &(usize, bool)) -> bool {
3005
0
            p
3006
0
        }
3007
3008
0
        let (i, _) = self.map(predicate).enumerate().find_any(check)?;
3009
0
        Some(i)
3010
0
    }
3011
3012
    /// Searches for the sequentially **first** item in the parallel iterator
3013
    /// that matches the given predicate, and returns its index.
3014
    ///
3015
    /// Like `ParallelIterator::find_first`, once a match is found,
3016
    /// all attempts to the right of the match will be stopped, while
3017
    /// attempts to the left must continue in case an earlier match
3018
    /// is found.
3019
    ///
3020
    /// Note that not all parallel iterators have a useful order, much like
3021
    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
3022
    /// just want the first match that discovered anywhere in the iterator,
3023
    /// `position_any` is a better choice.
3024
    ///
3025
    /// # Examples
3026
    ///
3027
    /// ```
3028
    /// use rayon::prelude::*;
3029
    ///
3030
    /// let a = [1, 2, 3, 3];
3031
    ///
3032
    /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
3033
    ///
3034
    /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
3035
    /// ```
3036
0
    fn position_first<P>(self, predicate: P) -> Option<usize>
3037
0
    where
3038
0
        P: Fn(Self::Item) -> bool + Sync + Send,
3039
    {
3040
        #[inline]
3041
0
        fn check(&(_, p): &(usize, bool)) -> bool {
3042
0
            p
3043
0
        }
3044
3045
0
        let (i, _) = self.map(predicate).enumerate().find_first(check)?;
3046
0
        Some(i)
3047
0
    }
3048
3049
    /// Searches for the sequentially **last** item in the parallel iterator
3050
    /// that matches the given predicate, and returns its index.
3051
    ///
3052
    /// Like `ParallelIterator::find_last`, once a match is found,
3053
    /// all attempts to the left of the match will be stopped, while
3054
    /// attempts to the right must continue in case a later match
3055
    /// is found.
3056
    ///
3057
    /// Note that not all parallel iterators have a useful order, much like
3058
    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
3059
    /// order doesn't actually matter to you, `position_any` is a better
3060
    /// choice.
3061
    ///
3062
    /// # Examples
3063
    ///
3064
    /// ```
3065
    /// use rayon::prelude::*;
3066
    ///
3067
    /// let a = [1, 2, 3, 3];
3068
    ///
3069
    /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
3070
    ///
3071
    /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
3072
    /// ```
3073
0
    fn position_last<P>(self, predicate: P) -> Option<usize>
3074
0
    where
3075
0
        P: Fn(Self::Item) -> bool + Sync + Send,
3076
    {
3077
        #[inline]
3078
0
        fn check(&(_, p): &(usize, bool)) -> bool {
3079
0
            p
3080
0
        }
3081
3082
0
        let (i, _) = self.map(predicate).enumerate().find_last(check)?;
3083
0
        Some(i)
3084
0
    }
3085
3086
    #[doc(hidden)]
3087
    #[deprecated(
3088
        note = "parallel `position` does not search in order -- use `position_any`, \\
3089
                `position_first`, or `position_last`"
3090
    )]
3091
0
    fn position<P>(self, predicate: P) -> Option<usize>
3092
0
    where
3093
0
        P: Fn(Self::Item) -> bool + Sync + Send,
3094
    {
3095
0
        self.position_any(predicate)
3096
0
    }
3097
3098
    /// Searches for items in the parallel iterator that match the given
3099
    /// predicate, and returns their indices.
3100
    ///
3101
    /// # Examples
3102
    ///
3103
    /// ```
3104
    /// use rayon::prelude::*;
3105
    ///
3106
    /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3107
    ///
3108
    /// // Find the positions of primes congruent to 1 modulo 6
3109
    /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3110
    /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3111
    ///
3112
    /// // Find the positions of primes congruent to 5 modulo 6
3113
    /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3114
    /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3115
    /// ```
3116
0
    fn positions<P>(self, predicate: P) -> Positions<Self, P>
3117
0
    where
3118
0
        P: Fn(Self::Item) -> bool + Sync + Send,
3119
    {
3120
0
        Positions::new(self, predicate)
3121
0
    }
3122
3123
    /// Produces a new iterator with the elements of this iterator in
3124
    /// reverse order.
3125
    ///
3126
    /// # Examples
3127
    ///
3128
    /// ```
3129
    /// use rayon::prelude::*;
3130
    ///
3131
    /// let result: Vec<_> = (0..5)
3132
    ///     .into_par_iter()
3133
    ///     .rev()
3134
    ///     .collect();
3135
    ///
3136
    /// assert_eq!(result, [4, 3, 2, 1, 0]);
3137
    /// ```
3138
0
    fn rev(self) -> Rev<Self> {
3139
0
        Rev::new(self)
3140
0
    }
3141
3142
    /// Sets the minimum length of iterators desired to process in each
3143
    /// rayon job.  Rayon will not split any smaller than this length, but
3144
    /// of course an iterator could already be smaller to begin with.
3145
    ///
3146
    /// Producers like `zip` and `interleave` will use greater of the two
3147
    /// minimums.
3148
    /// Chained iterators and iterators inside `flat_map` may each use
3149
    /// their own minimum length.
3150
    ///
3151
    /// # Examples
3152
    ///
3153
    /// ```
3154
    /// use rayon::prelude::*;
3155
    ///
3156
    /// let min = (0..1_000_000)
3157
    ///     .into_par_iter()
3158
    ///     .with_min_len(1234)
3159
    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3160
    ///     .min().unwrap();
3161
    ///
3162
    /// assert!(min >= 1234);
3163
    /// ```
3164
0
    fn with_min_len(self, min: usize) -> MinLen<Self> {
3165
0
        MinLen::new(self, min)
3166
0
    }
3167
3168
    /// Sets the maximum length of iterators desired to process in each
3169
    /// rayon job.  Rayon will try to split at least below this length,
3170
    /// unless that would put it below the length from `with_min_len()`.
3171
    /// For example, given min=10 and max=15, a length of 16 will not be
3172
    /// split any further.
3173
    ///
3174
    /// Producers like `zip` and `interleave` will use lesser of the two
3175
    /// maximums.
3176
    /// Chained iterators and iterators inside `flat_map` may each use
3177
    /// their own maximum length.
3178
    ///
3179
    /// # Examples
3180
    ///
3181
    /// ```
3182
    /// use rayon::prelude::*;
3183
    ///
3184
    /// let max = (0..1_000_000)
3185
    ///     .into_par_iter()
3186
    ///     .with_max_len(1234)
3187
    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3188
    ///     .max().unwrap();
3189
    ///
3190
    /// assert!(max <= 1234);
3191
    /// ```
3192
0
    fn with_max_len(self, max: usize) -> MaxLen<Self> {
3193
0
        MaxLen::new(self, max)
3194
0
    }
3195
3196
    /// Produces an exact count of how many items this iterator will
3197
    /// produce, presuming no panic occurs.
3198
    ///
3199
    /// # Examples
3200
    ///
3201
    /// ```
3202
    /// use rayon::prelude::*;
3203
    ///
3204
    /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3205
    /// assert_eq!(par_iter.len(), 10);
3206
    ///
3207
    /// let vec: Vec<_> = par_iter.collect();
3208
    /// assert_eq!(vec.len(), 10);
3209
    /// ```
3210
    fn len(&self) -> usize;
3211
3212
    /// Internal method used to define the behavior of this parallel
3213
    /// iterator. You should not need to call this directly.
3214
    ///
3215
    /// This method causes the iterator `self` to start producing
3216
    /// items and to feed them to the consumer `consumer` one by one.
3217
    /// It may split the consumer before doing so to create the
3218
    /// opportunity to produce in parallel. If a split does happen, it
3219
    /// will inform the consumer of the index where the split should
3220
    /// occur (unlike `ParallelIterator::drive_unindexed()`).
3221
    ///
3222
    /// See the [README] for more details on the internals of parallel
3223
    /// iterators.
3224
    ///
3225
    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3226
    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3227
3228
    /// Internal method used to define the behavior of this parallel
3229
    /// iterator. You should not need to call this directly.
3230
    ///
3231
    /// This method converts the iterator into a producer P and then
3232
    /// invokes `callback.callback()` with P. Note that the type of
3233
    /// this producer is not defined as part of the API, since
3234
    /// `callback` must be defined generically for all producers. This
3235
    /// allows the producer type to contain references; it also means
3236
    /// that parallel iterators can adjust that type without causing a
3237
    /// breaking change.
3238
    ///
3239
    /// See the [README] for more details on the internals of parallel
3240
    /// iterators.
3241
    ///
3242
    /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
3243
    fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3244
}
3245
3246
/// `FromParallelIterator` implements the creation of a collection
3247
/// from a [`ParallelIterator`]. By implementing
3248
/// `FromParallelIterator` for a given type, you define how it will be
3249
/// created from an iterator.
3250
///
3251
/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3252
///
3253
/// [`collect()`]: ParallelIterator::collect()
3254
///
3255
/// # Examples
3256
///
3257
/// Implementing `FromParallelIterator` for your type:
3258
///
3259
/// ```
3260
/// use rayon::prelude::*;
3261
///
3262
/// struct BlackHole {
3263
///     mass: usize,
3264
/// }
3265
///
3266
/// impl<T: Send> FromParallelIterator<T> for BlackHole {
3267
///     fn from_par_iter<I>(par_iter: I) -> Self
3268
///         where I: IntoParallelIterator<Item = T>
3269
///     {
3270
///         let par_iter = par_iter.into_par_iter();
3271
///         BlackHole {
3272
///             mass: par_iter.count() * size_of::<T>(),
3273
///         }
3274
///     }
3275
/// }
3276
///
3277
/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3278
/// assert_eq!(bh.mass, 4000);
3279
/// ```
3280
pub trait FromParallelIterator<T>
3281
where
3282
    T: Send,
3283
{
3284
    /// Creates an instance of the collection from the parallel iterator `par_iter`.
3285
    ///
3286
    /// If your collection is not naturally parallel, the easiest (and
3287
    /// fastest) way to do this is often to collect `par_iter` into a
3288
    /// [`LinkedList`] (via [`collect_vec_list`]) or another intermediate
3289
    /// data structure and then sequentially extend your collection. However,
3290
    /// a more 'native' technique is to use the [`par_iter.fold`] or
3291
    /// [`par_iter.fold_with`] methods to create the collection.
3292
    /// Alternatively, if your collection is 'natively' parallel, you
3293
    /// can use [`par_iter.for_each`] to process each element in turn.
3294
    ///
3295
    /// [`LinkedList`]: std::collections::LinkedList
3296
    /// [`collect_vec_list`]: ParallelIterator::collect_vec_list
3297
    /// [`par_iter.fold`]: ParallelIterator::fold()
3298
    /// [`par_iter.fold_with`]: ParallelIterator::fold_with()
3299
    /// [`par_iter.for_each`]: ParallelIterator::for_each()
3300
    fn from_par_iter<I>(par_iter: I) -> Self
3301
    where
3302
        I: IntoParallelIterator<Item = T>;
3303
}
3304
3305
/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3306
///
3307
/// # Examples
3308
///
3309
/// Implementing `ParallelExtend` for your type:
3310
///
3311
/// ```
3312
/// use rayon::prelude::*;
3313
///
3314
/// struct BlackHole {
3315
///     mass: usize,
3316
/// }
3317
///
3318
/// impl<T: Send> ParallelExtend<T> for BlackHole {
3319
///     fn par_extend<I>(&mut self, par_iter: I)
3320
///         where I: IntoParallelIterator<Item = T>
3321
///     {
3322
///         let par_iter = par_iter.into_par_iter();
3323
///         self.mass += par_iter.count() * size_of::<T>();
3324
///     }
3325
/// }
3326
///
3327
/// let mut bh = BlackHole { mass: 0 };
3328
/// bh.par_extend(0i32..1000);
3329
/// assert_eq!(bh.mass, 4000);
3330
/// bh.par_extend(0i64..10);
3331
/// assert_eq!(bh.mass, 4080);
3332
/// ```
3333
pub trait ParallelExtend<T>
3334
where
3335
    T: Send,
3336
{
3337
    /// Extends an instance of the collection with the elements drawn
3338
    /// from the parallel iterator `par_iter`.
3339
    ///
3340
    /// # Examples
3341
    ///
3342
    /// ```
3343
    /// use rayon::prelude::*;
3344
    ///
3345
    /// let mut vec = vec![];
3346
    /// vec.par_extend(0..5);
3347
    /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3348
    /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3349
    /// ```
3350
    fn par_extend<I>(&mut self, par_iter: I)
3351
    where
3352
        I: IntoParallelIterator<Item = T>;
3353
}
3354
3355
/// `ParallelDrainFull` creates a parallel iterator that moves all items
3356
/// from a collection while retaining the original capacity.
3357
///
3358
/// Types which are indexable typically implement [`ParallelDrainRange`]
3359
/// instead, where you can drain fully with `par_drain(..)`.
3360
pub trait ParallelDrainFull {
3361
    /// The draining parallel iterator type that will be created.
3362
    type Iter: ParallelIterator<Item = Self::Item>;
3363
3364
    /// The type of item that the parallel iterator will produce.
3365
    /// This is usually the same as `IntoParallelIterator::Item`.
3366
    type Item: Send;
3367
3368
    /// Returns a draining parallel iterator over an entire collection.
3369
    ///
3370
    /// When the iterator is dropped, all items are removed, even if the
3371
    /// iterator was not fully consumed. If the iterator is leaked, for example
3372
    /// using `std::mem::forget`, it is unspecified how many items are removed.
3373
    ///
3374
    /// # Examples
3375
    ///
3376
    /// ```
3377
    /// use rayon::prelude::*;
3378
    /// use std::collections::{BinaryHeap, HashSet};
3379
    ///
3380
    /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3381
    ///
3382
    /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3383
    /// assert_eq!(
3384
    ///     // heaps are drained in arbitrary order
3385
    ///     heap.par_drain()
3386
    ///         .inspect(|x| assert!(squares.contains(x)))
3387
    ///         .count(),
3388
    ///     squares.len(),
3389
    /// );
3390
    /// assert!(heap.is_empty());
3391
    /// assert!(heap.capacity() >= squares.len());
3392
    /// ```
3393
    fn par_drain(self) -> Self::Iter;
3394
}
3395
3396
/// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3397
/// from a collection while retaining the original capacity.
3398
///
3399
/// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3400
pub trait ParallelDrainRange<Idx = usize> {
3401
    /// The draining parallel iterator type that will be created.
3402
    type Iter: ParallelIterator<Item = Self::Item>;
3403
3404
    /// The type of item that the parallel iterator will produce.
3405
    /// This is usually the same as `IntoParallelIterator::Item`.
3406
    type Item: Send;
3407
3408
    /// Returns a draining parallel iterator over a range of the collection.
3409
    ///
3410
    /// When the iterator is dropped, all items in the range are removed, even
3411
    /// if the iterator was not fully consumed. If the iterator is leaked, for
3412
    /// example using `std::mem::forget`, it is unspecified how many items are
3413
    /// removed.
3414
    ///
3415
    /// # Examples
3416
    ///
3417
    /// ```
3418
    /// use rayon::prelude::*;
3419
    ///
3420
    /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3421
    ///
3422
    /// println!("RangeFull");
3423
    /// let mut vec = squares.clone();
3424
    /// assert!(vec.par_drain(..)
3425
    ///            .eq(squares.par_iter().copied()));
3426
    /// assert!(vec.is_empty());
3427
    /// assert!(vec.capacity() >= squares.len());
3428
    ///
3429
    /// println!("RangeFrom");
3430
    /// let mut vec = squares.clone();
3431
    /// assert!(vec.par_drain(5..)
3432
    ///            .eq(squares[5..].par_iter().copied()));
3433
    /// assert_eq!(&vec[..], &squares[..5]);
3434
    /// assert!(vec.capacity() >= squares.len());
3435
    ///
3436
    /// println!("RangeTo");
3437
    /// let mut vec = squares.clone();
3438
    /// assert!(vec.par_drain(..5)
3439
    ///            .eq(squares[..5].par_iter().copied()));
3440
    /// assert_eq!(&vec[..], &squares[5..]);
3441
    /// assert!(vec.capacity() >= squares.len());
3442
    ///
3443
    /// println!("RangeToInclusive");
3444
    /// let mut vec = squares.clone();
3445
    /// assert!(vec.par_drain(..=5)
3446
    ///            .eq(squares[..=5].par_iter().copied()));
3447
    /// assert_eq!(&vec[..], &squares[6..]);
3448
    /// assert!(vec.capacity() >= squares.len());
3449
    ///
3450
    /// println!("Range");
3451
    /// let mut vec = squares.clone();
3452
    /// assert!(vec.par_drain(3..7)
3453
    ///            .eq(squares[3..7].par_iter().copied()));
3454
    /// assert_eq!(&vec[..3], &squares[..3]);
3455
    /// assert_eq!(&vec[3..], &squares[7..]);
3456
    /// assert!(vec.capacity() >= squares.len());
3457
    ///
3458
    /// println!("RangeInclusive");
3459
    /// let mut vec = squares.clone();
3460
    /// assert!(vec.par_drain(3..=7)
3461
    ///            .eq(squares[3..=7].par_iter().copied()));
3462
    /// assert_eq!(&vec[..3], &squares[..3]);
3463
    /// assert_eq!(&vec[3..], &squares[8..]);
3464
    /// assert!(vec.capacity() >= squares.len());
3465
    /// ```
3466
    fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3467
}
3468
3469
/// We hide the `Try` trait in a private module, as it's only meant to be a
3470
/// stable clone of the standard library's `Try` trait, as yet unstable.
3471
mod private {
3472
    use std::convert::Infallible;
3473
    use std::ops::ControlFlow::{self, Break, Continue};
3474
    use std::task::Poll;
3475
3476
    /// Clone of `std::ops::Try`.
3477
    ///
3478
    /// Implementing this trait is not permitted outside of `rayon`.
3479
    pub trait Try {
3480
        private_decl! {}
3481
3482
        type Output;
3483
        type Residual;
3484
3485
        fn from_output(output: Self::Output) -> Self;
3486
3487
        fn from_residual(residual: Self::Residual) -> Self;
3488
3489
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3490
    }
3491
3492
    impl<B, C> Try for ControlFlow<B, C> {
3493
        private_impl! {}
3494
3495
        type Output = C;
3496
        type Residual = ControlFlow<B, Infallible>;
3497
3498
0
        fn from_output(output: Self::Output) -> Self {
3499
0
            Continue(output)
3500
0
        }
3501
3502
0
        fn from_residual(residual: Self::Residual) -> Self {
3503
0
            match residual {
3504
0
                Break(b) => Break(b),
3505
                #[allow(unreachable_patterns)]
3506
                Continue(_) => unreachable!(),
3507
            }
3508
0
        }
3509
3510
0
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3511
0
            match self {
3512
0
                Continue(c) => Continue(c),
3513
0
                Break(b) => Break(Break(b)),
3514
            }
3515
0
        }
3516
    }
3517
3518
    impl<T> Try for Option<T> {
3519
        private_impl! {}
3520
3521
        type Output = T;
3522
        type Residual = Option<Infallible>;
3523
3524
0
        fn from_output(output: Self::Output) -> Self {
3525
0
            Some(output)
3526
0
        }
3527
3528
0
        fn from_residual(residual: Self::Residual) -> Self {
3529
0
            match residual {
3530
0
                None => None,
3531
                #[allow(unreachable_patterns)]
3532
                Some(_) => unreachable!(),
3533
            }
3534
0
        }
3535
3536
0
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3537
0
            match self {
3538
0
                Some(c) => Continue(c),
3539
0
                None => Break(None),
3540
            }
3541
0
        }
3542
    }
3543
3544
    impl<T, E> Try for Result<T, E> {
3545
        private_impl! {}
3546
3547
        type Output = T;
3548
        type Residual = Result<Infallible, E>;
3549
3550
0
        fn from_output(output: Self::Output) -> Self {
3551
0
            Ok(output)
3552
0
        }
3553
3554
0
        fn from_residual(residual: Self::Residual) -> Self {
3555
0
            match residual {
3556
0
                Err(e) => Err(e),
3557
                #[allow(unreachable_patterns)]
3558
                Ok(_) => unreachable!(),
3559
            }
3560
0
        }
3561
3562
0
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3563
0
            match self {
3564
0
                Ok(c) => Continue(c),
3565
0
                Err(e) => Break(Err(e)),
3566
            }
3567
0
        }
3568
    }
3569
3570
    impl<T, E> Try for Poll<Result<T, E>> {
3571
        private_impl! {}
3572
3573
        type Output = Poll<T>;
3574
        type Residual = Result<Infallible, E>;
3575
3576
0
        fn from_output(output: Self::Output) -> Self {
3577
0
            output.map(Ok)
3578
0
        }
3579
3580
0
        fn from_residual(residual: Self::Residual) -> Self {
3581
0
            match residual {
3582
0
                Err(e) => Poll::Ready(Err(e)),
3583
                #[allow(unreachable_patterns)]
3584
                Ok(_) => unreachable!(),
3585
            }
3586
0
        }
3587
3588
0
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3589
0
            match self {
3590
0
                Poll::Pending => Continue(Poll::Pending),
3591
0
                Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3592
0
                Poll::Ready(Err(e)) => Break(Err(e)),
3593
            }
3594
0
        }
3595
    }
3596
3597
    impl<T, E> Try for Poll<Option<Result<T, E>>> {
3598
        private_impl! {}
3599
3600
        type Output = Poll<Option<T>>;
3601
        type Residual = Result<Infallible, E>;
3602
3603
0
        fn from_output(output: Self::Output) -> Self {
3604
0
            match output {
3605
0
                Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3606
0
                Poll::Pending => Poll::Pending,
3607
            }
3608
0
        }
3609
3610
0
        fn from_residual(residual: Self::Residual) -> Self {
3611
0
            match residual {
3612
0
                Err(e) => Poll::Ready(Some(Err(e))),
3613
                #[allow(unreachable_patterns)]
3614
                Ok(_) => unreachable!(),
3615
            }
3616
0
        }
3617
3618
0
        fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3619
0
            match self {
3620
0
                Poll::Pending => Continue(Poll::Pending),
3621
0
                Poll::Ready(None) => Continue(Poll::Ready(None)),
3622
0
                Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3623
0
                Poll::Ready(Some(Err(e))) => Break(Err(e)),
3624
            }
3625
0
        }
3626
    }
3627
}