logo
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
use crate::{Array, ArrayBase, DataMut, Dimension, IntoNdProducer, NdProducer, Zip};
use crate::AssignElem;

use crate::parallel::prelude::*;
use crate::parallel::par::ParallelSplits;
use super::send_producer::SendProducer;

use crate::partial::Partial;

/// # Parallel methods
///
/// These methods require crate feature `rayon`.
impl<A, S, D> ArrayBase<S, D>
where
    S: DataMut<Elem = A>,
    D: Dimension,
    A: Send + Sync,
{
    /// Parallel version of `map_inplace`.
    ///
    /// Modify the array in place by calling `f` by mutable reference on each element.
    ///
    /// Elements are visited in arbitrary order.
    pub fn par_map_inplace<F>(&mut self, f: F)
    where
        F: Fn(&mut A) + Sync + Send,
    {
        self.view_mut().into_par_iter().for_each(f)
    }

    /// Parallel version of `mapv_inplace`.
    ///
    /// Modify the array in place by calling `f` by **v**alue on each element.
    /// The array is updated with the new values.
    ///
    /// Elements are visited in arbitrary order.
    pub fn par_mapv_inplace<F>(&mut self, f: F)
    where
        F: Fn(A) -> A + Sync + Send,
        A: Clone,
    {
        self.view_mut()
            .into_par_iter()
            .for_each(move |x| *x = f(x.clone()))
    }
}

// Zip

const COLLECT_MAX_SPLITS: usize = 10;

macro_rules! zip_impl {
    ($([$notlast:ident $($p:ident)*],)+) => {
        $(
        #[allow(non_snake_case)]
        impl<D, $($p),*> Zip<($($p,)*), D>
            where $($p::Item : Send , )*
                  $($p : Send , )*
                  D: Dimension,
                  $($p: NdProducer<Dim=D> ,)*
        {
            /// The `par_for_each` method for `Zip`.
            ///
            /// This is a shorthand for using `.into_par_iter().for_each()` on
            /// `Zip`.
            ///
            /// Requires crate feature `rayon`.
            pub fn par_for_each<F>(self, function: F)
                where F: Fn($($p::Item),*) + Sync + Send
            {
                self.into_par_iter().for_each(move |($($p,)*)| function($($p),*))
            }

            /// The `par_apply` method for `Zip`.
            ///
            /// This is a shorthand for using `.into_par_iter().for_each()` on
            /// `Zip`.
            ///
            /// Requires crate feature `rayon`.
            #[deprecated(note="Renamed to .par_for_each()", since="0.15.0")]
            pub fn par_apply<F>(self, function: F)
                where F: Fn($($p::Item),*) + Sync + Send
            {
                self.into_par_iter().for_each(move |($($p,)*)| function($($p),*))
            }

            expand_if!(@bool [$notlast]

            /// Map and collect the results into a new array, which has the same size as the
            /// inputs.
            ///
            /// If all inputs are c- or f-order respectively, that is preserved in the output.
            pub fn par_map_collect<R>(self, f: impl Fn($($p::Item,)* ) -> R + Sync + Send)
                -> Array<R, D>
                where R: Send
            {
                let mut output = self.uninitialized_for_current_layout::<R>();
                let total_len = output.len();

                // Create a parallel iterator that produces chunks of the zip with the output
                // array.  It's crucial that both parts split in the same way, and in a way
                // so that the chunks of the output are still contig.
                //
                // Use a raw view so that we can alias the output data here and in the partial
                // result.
                let splits = unsafe {
                    ParallelSplits {
                        iter: self.and(SendProducer::new(output.raw_view_mut().cast::<R>())),
                        // Keep it from splitting the Zip down too small
                        max_splits: COLLECT_MAX_SPLITS,
                    }
                };

                let collect_result = splits.map(move |zip| {
                    // Apply the mapping function on this chunk of the zip
                    // Create a partial result for the contiguous slice of data being written to
                    unsafe {
                        zip.collect_with_partial(&f)
                    }
                })
                .reduce(Partial::stub, Partial::try_merge);

                if std::mem::needs_drop::<R>() {
                    debug_assert_eq!(total_len, collect_result.len,
                        "collect len is not correct, expected {}", total_len);
                    assert!(collect_result.len == total_len,
                        "Collect: Expected number of writes not completed");
                }

                // Here the collect result is complete, and we release its ownership and transfer
                // it to the output array.
                collect_result.release_ownership();
                unsafe {
                    output.assume_init()
                }
            }

            /// Map and collect the results into a new array, which has the same size as the
            /// inputs.
            ///
            /// If all inputs are c- or f-order respectively, that is preserved in the output.
            #[deprecated(note="Renamed to .par_map_collect()", since="0.15.0")]
            pub fn par_apply_collect<R>(self, f: impl Fn($($p::Item,)* ) -> R + Sync + Send)
                -> Array<R, D>
                where R: Send
            {
                self.par_map_collect(f)
            }

            /// Map and assign the results into the producer `into`, which should have the same
            /// size as the other inputs.
            ///
            /// The producer should have assignable items as dictated by the `AssignElem` trait,
            /// for example `&mut R`.
            pub fn par_map_assign_into<R, Q>(self, into: Q, f: impl Fn($($p::Item,)* ) -> R + Sync + Send)
                where Q: IntoNdProducer<Dim=D>,
                      Q::Item: AssignElem<R> + Send,
                      Q::Output: Send,
            {
                self.and(into)
                    .par_for_each(move |$($p, )* output_| {
                        output_.assign_elem(f($($p ),*));
                    });
            }

            /// Apply and assign the results into the producer `into`, which should have the same
            /// size as the other inputs.
            ///
            /// The producer should have assignable items as dictated by the `AssignElem` trait,
            /// for example `&mut R`.
            #[deprecated(note="Renamed to .par_map_assign_into()", since="0.15.0")]
            pub fn par_apply_assign_into<R, Q>(self, into: Q, f: impl Fn($($p::Item,)* ) -> R + Sync + Send)
                where Q: IntoNdProducer<Dim=D>,
                      Q::Item: AssignElem<R> + Send,
                      Q::Output: Send,
            {
                self.par_map_assign_into(into, f)
            }

            /// Parallel version of `fold`.
            ///
            /// Splits the producer in multiple tasks which each accumulate a single value
            /// using the `fold` closure. Those tasks are executed in parallel and their results
            /// are then combined to a single value using the `reduce` closure.
            ///
            /// The `identity` closure provides the initial values for each of the tasks and
            /// for the final reduction.
            ///
            /// This is a shorthand for calling `self.into_par_iter().fold(...).reduce(...)`.
            ///
            /// Note that it is often more efficient to parallelize not per-element but rather
            /// based on larger chunks of an array like generalized rows and operating on each chunk
            /// using a sequential variant of the accumulation.
            /// For example, sum each row sequentially and in parallel, taking advantage of locality
            /// and vectorization within each task, and then reduce their sums to the sum of the matrix.
            ///
            /// Also note that the splitting of the producer into multiple tasks is _not_ deterministic
            /// which needs to be considered when the accuracy of such an operation is analyzed.
            ///
            /// ## Examples
            ///
            /// ```rust
            /// use ndarray::{Array, Zip};
            ///
            /// let a = Array::<usize, _>::ones((128, 1024));
            /// let b = Array::<usize, _>::ones(128);
            ///
            /// let weighted_sum = Zip::from(a.rows()).and(&b).par_fold(
            ///     || 0,
            ///     |sum, row, factor| sum + row.sum() * factor,
            ///     |sum, other_sum| sum + other_sum,
            /// );
            ///
            /// assert_eq!(weighted_sum, a.len());
            /// ```
            pub fn par_fold<ID, F, R, T>(self, identity: ID, fold: F, reduce: R) -> T
            where
                ID: Fn() -> T + Send + Sync + Clone,
                F: Fn(T, $($p::Item),*) -> T + Send + Sync,
                R: Fn(T, T) -> T + Send + Sync,
                T: Send
            {
                self.into_par_iter()
                    .fold(identity.clone(), move |accumulator, ($($p,)*)| {
                        fold(accumulator, $($p),*)
                    })
                    .reduce(identity, reduce)
            }

            );
        }
        )+
    }
}

zip_impl! {
    [true P1],
    [true P1 P2],
    [true P1 P2 P3],
    [true P1 P2 P3 P4],
    [true P1 P2 P3 P4 P5],
    [false P1 P2 P3 P4 P5 P6],
}