ndarray_slice/lib.rs
1//! Fast and robust slice-based algorithms (e.g., [sorting], [selection], [search]) for
2//! non-contiguous (sub)views into *n*-dimensional arrays. Reimplements algorithms in [`slice`] and
3#![cfg_attr(feature = "rayon", doc = "[`rayon::slice`]")]
4#![cfg_attr(not(feature = "rayon"), doc = "`rayon::slice`")]
5//! for [`ndarray`] with arbitrary memory layout (e.g., non-contiguous).
6//!
7//! # Example
8//!
9//! ```
10//! use ndarray_slice::{ndarray::arr2, Slice1Ext};
11//!
12//! // 2-dimensional array of 4 rows and 5 columns.
13//! let mut v = arr2(&[[-5, 4, 1, -3, 2], // row 0, axis 0
14//! [ 8, 3, 2, 4, 8], // row 1, axis 0
15//! [38, 9, 3, 0, 3], // row 2, axis 0
16//! [ 4, 9, 0, 8, -1]]); // row 3, axis 0
17//! // \ \ \
18//! // column 0 \ column 4 axis 1
19//! // column 2 axis 1
20//!
21//! // Mutable subview into the last column.
22//! let mut column = v.column_mut(4);
23//!
24//! // Due to row-major memory layout, columns are non-contiguous
25//! // and hence cannot be sorted by viewing them as mutable slices.
26//! assert_eq!(column.as_slice_mut(), None);
27//!
28//! // Instead, sorting is specifically implemented for non-contiguous
29//! // mutable (sub)views.
30//! column.sort_unstable();
31//!
32//! assert!(v == arr2(&[[-5, 4, 1, -3, -1],
33//! [ 8, 3, 2, 4, 2],
34//! [38, 9, 3, 0, 3],
35//! [ 4, 9, 0, 8, 8]]));
36//! // \
37//! // column 4 sorted, others untouched
38//! ```
39//!
40//! # Current Implementation
41//!
42//! Complexities where *n* is the length of the (sub)view and *m* the count of indices to select.
43//!
44//! | Resource | Complexity | Sorting (stable) | Sorting (unstable) | Selection (unstable) | Bulk Selection (unstable) |
45//! |----------|------------|------------------|---------------------|----------------------|---------------------------|
46//! | Time | Best | *O*(*n*) | *O*(*n*) | *O*(*n*) | *O*(*n* log *m*) |
47//! | Time | Average | *O*(*n* log *n*) | *O*(*n* log *n*) | *O*(*n*) | *O*(*n* log *m*) |
48//! | Time | Worst | *O*(*n* log *n*) | *O*(*n* log *n*) | *O*(*n*) | *O*(*n* log *m*) |
49//! | Space | Best | *O*(1) | *O*(1) | *O*(1) | *O*(*m*) |
50//! | Space | Average | *O*(*n*/2) | *O*(log *n*) | *O*(1) | *O*(*m*+log *m*) |
51//! | Space | Worst | *O*(*n*/2) | *O*(log *n*) | *O*(1) | *O*(*m*+log *m*) |
52//!
53//!
54//! [sorting]: https://en.wikipedia.org/wiki/Sorting_algorithm
55//! [selection]: https://en.wikipedia.org/wiki/Selection_algorithm
56//! [search]: https://en.wikipedia.org/wiki/Search_algorithm
57//!
58//! # Roadmap
59//!
60//! * Add `SliceExt` trait for *n*-dimensional array or (sub)view with methods expecting `Axis` as
61//! their first argument. Comparing methods will always be suffixed with `_by` or `_by_key`
62//! defining how to compare multi-dimensional elements (e.g., rows) along the provided axis of
63//! interest (e.g., columns).
64//!
65//! # Features
66//!
67//! * `alloc` for stable `sort`/`sort_by`/`sort_by_key`. Enabled by `std`.
68//! * `std` for stable `sort_by_cached_key`. Enabled by `default` or `rayon`.
69//! * `stacker` for spilling recursion stack over to heap if necessary. Enabled by `default`.
70//! * `rayon` for parallel `par_sort*`/`par_select_many_nth_unstable*`.
71
72#![deny(
73 missing_docs,
74 rustdoc::broken_intra_doc_links,
75 rustdoc::missing_crate_level_docs
76)]
77#![cfg_attr(not(feature = "std"), no_std)]
78#![cfg_attr(docsrs, feature(doc_auto_cfg))]
79#![cfg_attr(miri, feature(maybe_uninit_slice))]
80
81#[inline(always)]
82fn maybe_grow<R, F: FnOnce() -> R>(callback: F) -> R {
83 #[cfg(feature = "stacker")]
84 {
85 stacker::maybe_grow(32 * 1_024, 1_024 * 1_024, callback)
86 }
87 #[cfg(not(feature = "stacker"))]
88 {
89 callback()
90 }
91}
92
93mod heap_sort;
94mod insertion_sort;
95mod merge_sort;
96mod partition;
97mod partition_dedup;
98mod quick_sort;
99mod stable_sort;
100
101#[cfg(feature = "rayon")]
102mod par;
103#[cfg(feature = "rayon")]
104use par::{
105 merge_sort::par_merge_sort, partition::par_partition_at_indices, quick_sort::par_quick_sort,
106};
107
108#[cfg(feature = "alloc")]
109use crate::stable_sort::stable_sort;
110
111use crate::{
112 partition::{is_sorted, partition_at_index, partition_at_indices, reverse},
113 partition_dedup::partition_dedup,
114 quick_sort::quick_sort,
115};
116use core::cmp::Ordering::{self, Equal, Greater, Less};
117use ndarray::{ArrayBase, ArrayViewMut1, Data, DataMut, Ix1};
118
119pub use ndarray;
120
121// Derivative work of [`core::slice`] licensed under `MIT OR Apache-2.0`.
122//
123// [`core::slice`]: https://doc.rust-lang.org/src/core/slice/mod.rs.html
124
125/// Extension trait for 1-dimensional [`ArrayBase<S, Ix1>`](`ArrayBase`) array or (sub)view with
126/// arbitrary memory layout (e.g., non-contiguous) providing methods (e.g., [sorting], [selection],
127/// [search]) similar to [`slice`] and
128#[cfg_attr(feature = "rayon", doc = "[`rayon::slice`].")]
129#[cfg_attr(not(feature = "rayon"), doc = "`rayon::slice`.")]
130///
131/// [sorting]: https://en.wikipedia.org/wiki/Sorting_algorithm
132/// [selection]: https://en.wikipedia.org/wiki/Selection_algorithm
133/// [search]: https://en.wikipedia.org/wiki/Search_algorithm
134pub trait Slice1Ext<A, S>
135where
136 S: Data<Elem = A>,
137{
138 /// Sorts the array in parallel.
139 ///
140 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* log *n*) worst-case.
141 ///
142 /// When applicable, unstable sorting is preferred because it is generally faster than stable
143 /// sorting and it doesn't allocate auxiliary memory.
144 /// See [`par_sort_unstable`](Slice1Ext::par_sort_unstable).
145 ///
146 /// # Current Implementation
147 ///
148 /// The current algorithm is an adaptive, iterative merge sort inspired by
149 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
150 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
151 /// two or more sorted sequences concatenated one after another.
152 ///
153 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
154 /// non-allocating insertion sort is used instead.
155 ///
156 /// In order to sort the array in parallel, the array is first divided into smaller chunks and
157 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
158 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
159 /// parallel subdivision of chunks and parallel merge operation.
160 ///
161 /// # Examples
162 ///
163 /// ```
164 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
165 ///
166 /// let mut v = arr1(&[-5, 4, 1, -3, 2]);
167 ///
168 /// v.par_sort();
169 /// assert!(v == arr1(&[-5, -3, 1, 2, 4]));
170 /// ```
171 #[cfg(feature = "rayon")]
172 fn par_sort(&mut self)
173 where
174 A: Ord + Send,
175 S: DataMut;
176 /// Sorts the array in parallel with a comparator function.
177 ///
178 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* log *n*) worst-case.
179 ///
180 /// The comparator function must define a total ordering for the elements in the array. If
181 /// the ordering is not total, the order of the elements is unspecified. An order is a
182 /// total order if it is (for all `a`, `b` and `c`):
183 ///
184 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
185 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
186 ///
187 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
188 /// `partial_cmp` as our sort function when we know the array doesn't contain a `NaN`.
189 ///
190 /// ```
191 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
192 ///
193 /// let mut floats = arr1(&[5f64, 4.0, 1.0, 3.0, 2.0]);
194 /// floats.par_sort_by(|a, b| a.partial_cmp(b).unwrap());
195 /// assert_eq!(floats, arr1(&[1.0, 2.0, 3.0, 4.0, 5.0]));
196 /// ```
197 ///
198 /// When applicable, unstable sorting is preferred because it is generally faster than stable
199 /// sorting and it doesn't allocate auxiliary memory.
200 /// See [`par_sort_unstable_by`](Slice1Ext::par_sort_unstable_by).
201 ///
202 /// # Current Implementation
203 ///
204 /// The current algorithm is an adaptive, iterative merge sort inspired by
205 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
206 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
207 /// two or more sorted sequences concatenated one after another.
208 ///
209 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
210 /// non-allocating insertion sort is used instead.
211 ///
212 /// In order to sort the array in parallel, the array is first divided into smaller chunks and
213 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
214 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
215 /// parallel subdivision of chunks and parallel merge operation.
216 ///
217 /// # Examples
218 ///
219 /// ```
220 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
221 ///
222 /// let mut v = arr1(&[5, 4, 1, 3, 2]);
223 /// v.par_sort_by(|a, b| a.cmp(b));
224 /// assert!(v == arr1(&[1, 2, 3, 4, 5]));
225 ///
226 /// // reverse sorting
227 /// v.par_sort_by(|a, b| b.cmp(a));
228 /// assert!(v == arr1(&[5, 4, 3, 2, 1]));
229 /// ```
230 #[cfg(feature = "rayon")]
231 fn par_sort_by<F>(&mut self, compare: F)
232 where
233 A: Send,
234 F: Fn(&A, &A) -> Ordering + Sync,
235 S: DataMut;
236 /// Sorts the array in parallel with a key extraction function.
237 ///
238 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*mn* log *n*)
239 /// worst-case, where the key function is *O*(*m*).
240 ///
241 /// For expensive key functions (e.g. functions that are not simple property accesses or
242 /// basic operations), [`par_sort_by_cached_key`](Slice1Ext::par_sort_by_cached_key) is likely to be
243 /// significantly faster, as it does not recompute element keys."
244 ///
245 /// When applicable, unstable sorting is preferred because it is generally faster than stable
246 /// sorting and it doesn't allocate auxiliary memory.
247 /// See [`par_sort_unstable_by_key`](Slice1Ext::par_sort_unstable_by_key).
248 ///
249 /// # Current Implementation
250 ///
251 /// The current algorithm is an adaptive, iterative merge sort inspired by
252 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
253 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
254 /// two or more sorted sequences concatenated one after another.
255 ///
256 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
257 /// non-allocating insertion sort is used instead.
258 ///
259 /// In order to sort the array in parallel, the array is first divided into smaller chunks and
260 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
261 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
262 /// parallel subdivision of chunks and parallel merge operation.
263 ///
264 /// # Examples
265 ///
266 /// ```
267 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
268 ///
269 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
270 ///
271 /// v.par_sort_by_key(|k| k.abs());
272 /// assert!(v == arr1(&[1, 2, -3, 4, -5]));
273 /// ```
274 #[cfg(feature = "rayon")]
275 fn par_sort_by_key<K, F>(&mut self, f: F)
276 where
277 A: Send,
278 K: Ord,
279 F: Fn(&A) -> K + Sync,
280 S: DataMut;
281 /// Sorts the array in parallel with a key extraction function.
282 ///
283 /// During sorting, the key function is called at most once per element, by using
284 /// temporary storage to remember the results of key evaluation.
285 /// The order of calls to the key function is unspecified and may change in future versions
286 /// of the standard library.
287 ///
288 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*mn* + *n* log *n*)
289 /// worst-case, where the key function is *O*(*m*).
290 ///
291 /// For simple key functions (e.g., functions that are property accesses or
292 /// basic operations), [`par_sort_by_key`](Slice1Ext::par_sort_by_key) is likely to be
293 /// faster.
294 ///
295 /// # Current Implementation
296 ///
297 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
298 /// which combines the fast average case of randomized quicksort with the fast worst case of
299 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
300 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
301 /// deterministic behavior.
302 ///
303 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
304 /// length of the array.
305 ///
306 /// In order to sort the array in parallel, the array is first divided into smaller chunks and
307 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
308 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
309 /// parallel subdivision of chunks and parallel merge operation.
310 ///
311 /// # Examples
312 ///
313 /// ```
314 /// # if !cfg!(miri) {
315 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
316 ///
317 /// let mut v = arr1(&[-5i32, 4, 32, -3, 2]);
318 ///
319 /// v.par_sort_by_cached_key(|k| k.to_string());
320 /// assert!(v == arr1(&[-3, -5, 2, 32, 4]));
321 /// # }
322 /// ```
323 ///
324 /// [pdqsort]: https://github.com/orlp/pdqsort
325 #[cfg(feature = "rayon")]
326 fn par_sort_by_cached_key<K, F>(&mut self, f: F)
327 where
328 A: Send + Sync,
329 F: Fn(&A) -> K + Sync,
330 K: Ord + Send,
331 S: DataMut;
332
333 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* log *n*) worst-case.
334 ///
335 /// When applicable, unstable sorting is preferred because it is generally faster than stable
336 /// sorting and it doesn't allocate auxiliary memory.
337 /// See [`sort_unstable`](Slice1Ext::sort_unstable).
338 ///
339 /// # Current Implementation
340 ///
341 /// The current algorithm is an adaptive, iterative merge sort inspired by
342 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
343 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
344 /// two or more sorted sequences concatenated one after another.
345 ///
346 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
347 /// non-allocating insertion sort is used instead.
348 ///
349 /// # Examples
350 ///
351 /// ```
352 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
353 ///
354 /// let mut v = arr1(&[-5, 4, 1, -3, 2]);
355 ///
356 /// v.sort();
357 /// assert!(v == arr1(&[-5, -3, 1, 2, 4]));
358 /// ```
359 #[cfg(feature = "alloc")]
360 fn sort(&mut self)
361 where
362 A: Ord,
363 S: DataMut;
364 /// Sorts the array with a comparator function.
365 ///
366 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* log *n*) worst-case.
367 ///
368 /// The comparator function must define a total ordering for the elements in the array. If
369 /// the ordering is not total, the order of the elements is unspecified. An order is a
370 /// total order if it is (for all `a`, `b` and `c`):
371 ///
372 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
373 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
374 ///
375 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
376 /// `partial_cmp` as our sort function when we know the array doesn't contain a `NaN`.
377 ///
378 /// ```
379 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
380 ///
381 /// let mut floats = arr1(&[5f64, 4.0, 1.0, 3.0, 2.0]);
382 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
383 /// assert_eq!(floats, arr1(&[1.0, 2.0, 3.0, 4.0, 5.0]));
384 /// ```
385 ///
386 /// When applicable, unstable sorting is preferred because it is generally faster than stable
387 /// sorting and it doesn't allocate auxiliary memory.
388 /// See [`sort_unstable_by`](Slice1Ext::sort_unstable_by).
389 ///
390 /// # Current Implementation
391 ///
392 /// The current algorithm is an adaptive, iterative merge sort inspired by
393 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
394 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
395 /// two or more sorted sequences concatenated one after another.
396 ///
397 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
398 /// non-allocating insertion sort is used instead.
399 ///
400 /// # Examples
401 ///
402 /// ```
403 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
404 ///
405 /// let mut v = arr1(&[5, 4, 1, 3, 2]);
406 /// v.sort_by(|a, b| a.cmp(b));
407 /// assert!(v == arr1(&[1, 2, 3, 4, 5]));
408 ///
409 /// // reverse sorting
410 /// v.sort_by(|a, b| b.cmp(a));
411 /// assert!(v == arr1(&[5, 4, 3, 2, 1]));
412 /// ```
413 #[cfg(feature = "alloc")]
414 fn sort_by<F>(&mut self, compare: F)
415 where
416 F: FnMut(&A, &A) -> Ordering,
417 S: DataMut;
418 /// Sorts the array with a key extraction function.
419 ///
420 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*mn* log *n*)
421 /// worst-case, where the key function is *O*(*m*).
422 ///
423 #[cfg_attr(
424 feature = "std",
425 doc = "\
426 For expensive key functions (e.g. functions that are not simple property accesses or
427 basic operations), [`sort_by_cached_key`](Slice1Ext::sort_by_cached_key) is likely to be
428 significantly faster, as it does not recompute element keys."
429 )]
430 ///
431 /// When applicable, unstable sorting is preferred because it is generally faster than stable
432 /// sorting and it doesn't allocate auxiliary memory.
433 /// See [`sort_unstable_by_key`](Slice1Ext::sort_unstable_by_key).
434 ///
435 /// # Current Implementation
436 ///
437 /// The current algorithm is an adaptive, iterative merge sort inspired by
438 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
439 /// It is designed to be very fast in cases where the array is nearly sorted, or consists of
440 /// two or more sorted sequences concatenated one after another.
441 ///
442 /// Also, it allocates temporary storage half the size of `self`, but for short arrays a
443 /// non-allocating insertion sort is used instead.
444 ///
445 /// # Examples
446 ///
447 /// ```
448 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
449 ///
450 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
451 ///
452 /// v.sort_by_key(|k| k.abs());
453 /// assert!(v == arr1(&[1, 2, -3, 4, -5]));
454 /// ```
455 #[cfg(feature = "alloc")]
456 fn sort_by_key<K, F>(&mut self, f: F)
457 where
458 K: Ord,
459 F: FnMut(&A) -> K,
460 S: DataMut;
461
462 /// Sorts the array with a key extraction function.
463 ///
464 /// During sorting, the key function is called at most once per element, by using
465 /// temporary storage to remember the results of key evaluation.
466 /// The order of calls to the key function is unspecified and may change in future versions
467 /// of the standard library.
468 ///
469 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*mn* + *n* log *n*)
470 /// worst-case, where the key function is *O*(*m*).
471 ///
472 /// For simple key functions (e.g., functions that are property accesses or
473 /// basic operations), [`sort_by_key`](Slice1Ext::sort_by_key) is likely to be
474 /// faster.
475 ///
476 /// # Current Implementation
477 ///
478 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
479 /// which combines the fast average case of randomized quicksort with the fast worst case of
480 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
481 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
482 /// deterministic behavior.
483 ///
484 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
485 /// length of the array.
486 ///
487 /// # Examples
488 ///
489 /// ```
490 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
491 ///
492 /// let mut v = arr1(&[-5i32, 4, 32, -3, 2]);
493 ///
494 /// v.sort_by_cached_key(|k| k.to_string());
495 /// assert!(v == arr1(&[-3, -5, 2, 32, 4]));
496 /// ```
497 ///
498 /// [pdqsort]: https://github.com/orlp/pdqsort
499 #[cfg(feature = "std")]
500 fn sort_by_cached_key<K, F>(&mut self, f: F)
501 where
502 F: FnMut(&A) -> K,
503 K: Ord,
504 S: DataMut;
505
506 /// Sorts the array in parallel, but might not preserve the order of equal elements.
507 ///
508 /// This sort is unstable (i.e., may reorder equal elements), in-place
509 /// (i.e., does not allocate), and *O*(*n* log *n*) worst-case.
510 ///
511 /// # Current Implementation
512 ///
513 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
514 /// which combines the fast average case of randomized quicksort with the fast worst case of
515 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
516 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
517 /// deterministic behavior.
518 ///
519 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
520 /// array consists of several concatenated sorted sequences.
521 ///
522 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
523 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
524 /// parallel.
525 ///
526 /// # Examples
527 ///
528 /// ```
529 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
530 ///
531 /// let mut v = arr1(&[-5, 4, 1, -3, 2]);
532 ///
533 /// v.par_sort_unstable();
534 /// assert!(v == arr1(&[-5, -3, 1, 2, 4]));
535 /// ```
536 ///
537 /// [pdqsort]: https://github.com/orlp/pdqsort
538 #[cfg(feature = "rayon")]
539 fn par_sort_unstable(&mut self)
540 where
541 A: Ord + Send,
542 S: DataMut;
543 /// Sorts the array in parallel with a comparator function, but might not preserve the order of equal
544 /// elements.
545 ///
546 /// This sort is unstable (i.e., may reorder equal elements), in-place
547 /// (i.e., does not allocate), and *O*(*n* log *n*) worst-case.
548 ///
549 /// The comparator function must define a total ordering for the elements in the array. If
550 /// the ordering is not total, the order of the elements is unspecified. An order is a
551 /// total order if it is (for all `a`, `b` and `c`):
552 ///
553 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
554 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
555 ///
556 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
557 /// `partial_cmp` as our sort function when we know the array doesn't contain a `NaN`.
558 ///
559 /// ```
560 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
561 ///
562 /// let mut floats = arr1(&[5f64, 4.0, 1.0, 3.0, 2.0]);
563 /// floats.par_sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
564 /// assert_eq!(floats, arr1(&[1.0, 2.0, 3.0, 4.0, 5.0]));
565 /// ```
566 ///
567 /// # Current Implementation
568 ///
569 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
570 /// which combines the fast average case of randomized quicksort with the fast worst case of
571 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
572 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
573 /// deterministic behavior.
574 ///
575 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
576 /// array consists of several concatenated sorted sequences.
577 ///
578 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
579 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
580 /// parallel.
581 ///
582 /// # Examples
583 ///
584 /// ```
585 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
586 ///
587 /// let mut v = arr1(&[5, 4, 1, 3, 2]);
588 /// v.par_sort_unstable_by(|a, b| a.cmp(b));
589 /// assert!(v == arr1(&[1, 2, 3, 4, 5]));
590 ///
591 /// // reverse sorting
592 /// v.par_sort_unstable_by(|a, b| b.cmp(a));
593 /// assert!(v == arr1(&[5, 4, 3, 2, 1]));
594 /// ```
595 ///
596 /// [pdqsort]: https://github.com/orlp/pdqsort
597 #[cfg(feature = "rayon")]
598 fn par_sort_unstable_by<F>(&mut self, compare: F)
599 where
600 A: Send,
601 F: Fn(&A, &A) -> Ordering + Sync,
602 S: DataMut;
603 /// Sorts the array in parallel with a key extraction function, but might not preserve the order of equal
604 /// elements.
605 ///
606 /// This sort is unstable (i.e., may reorder equal elements), in-place
607 /// (i.e., does not allocate), and *O*(*mn* log *n*) worst-case, where the key function is
608 /// *O*(*m*).
609 ///
610 /// # Current Implementation
611 ///
612 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
613 /// which combines the fast average case of randomized quicksort with the fast worst case of
614 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
615 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
616 /// deterministic behavior.
617 ///
618 /// Due to its key calling strategy, [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key)
619 /// is likely to be slower than [`par_sort_by_cached_key`](#method.par_sort_by_cached_key) in
620 /// cases where the key function is expensive.
621 ///
622 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
623 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
624 /// parallel.
625 ///
626 /// # Examples
627 ///
628 /// ```
629 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
630 ///
631 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
632 ///
633 /// v.par_sort_unstable_by_key(|k| k.abs());
634 /// assert!(v == arr1(&[1, 2, -3, 4, -5]));
635 /// ```
636 ///
637 /// [pdqsort]: https://github.com/orlp/pdqsort
638 #[cfg(feature = "rayon")]
639 fn par_sort_unstable_by_key<K, F>(&mut self, f: F)
640 where
641 A: Send,
642 K: Ord,
643 F: Fn(&A) -> K + Sync,
644 S: DataMut;
645
646 /// Sorts the array, but might not preserve the order of equal elements.
647 ///
648 /// This sort is unstable (i.e., may reorder equal elements), in-place
649 /// (i.e., does not allocate), and *O*(*n* log *n*) worst-case.
650 ///
651 /// # Current Implementation
652 ///
653 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
654 /// which combines the fast average case of randomized quicksort with the fast worst case of
655 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
656 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
657 /// deterministic behavior.
658 ///
659 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
660 /// array consists of several concatenated sorted sequences.
661 ///
662 /// # Examples
663 ///
664 /// ```
665 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
666 ///
667 /// let mut v = arr1(&[-5, 4, 1, -3, 2]);
668 ///
669 /// v.sort_unstable();
670 /// assert!(v == arr1(&[-5, -3, 1, 2, 4]));
671 /// ```
672 ///
673 /// [pdqsort]: https://github.com/orlp/pdqsort
674 fn sort_unstable(&mut self)
675 where
676 A: Ord,
677 S: DataMut;
678 /// Sorts the array with a comparator function, but might not preserve the order of equal
679 /// elements.
680 ///
681 /// This sort is unstable (i.e., may reorder equal elements), in-place
682 /// (i.e., does not allocate), and *O*(*n* log *n*) worst-case.
683 ///
684 /// The comparator function must define a total ordering for the elements in the array. If
685 /// the ordering is not total, the order of the elements is unspecified. An order is a
686 /// total order if it is (for all `a`, `b` and `c`):
687 ///
688 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
689 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
690 ///
691 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
692 /// `partial_cmp` as our sort function when we know the array doesn't contain a `NaN`.
693 ///
694 /// ```
695 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
696 ///
697 /// let mut floats = arr1(&[5f64, 4.0, 1.0, 3.0, 2.0]);
698 /// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
699 /// assert_eq!(floats, arr1(&[1.0, 2.0, 3.0, 4.0, 5.0]));
700 /// ```
701 ///
702 /// # Current Implementation
703 ///
704 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
705 /// which combines the fast average case of randomized quicksort with the fast worst case of
706 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
707 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
708 /// deterministic behavior.
709 ///
710 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
711 /// array consists of several concatenated sorted sequences.
712 ///
713 /// # Examples
714 ///
715 /// ```
716 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
717 ///
718 /// let mut v = arr1(&[5, 4, 1, 3, 2]);
719 /// v.sort_unstable_by(|a, b| a.cmp(b));
720 /// assert!(v == arr1(&[1, 2, 3, 4, 5]));
721 ///
722 /// // reverse sorting
723 /// v.sort_unstable_by(|a, b| b.cmp(a));
724 /// assert!(v == arr1(&[5, 4, 3, 2, 1]));
725 /// ```
726 ///
727 /// [pdqsort]: https://github.com/orlp/pdqsort
728 fn sort_unstable_by<F>(&mut self, compare: F)
729 where
730 F: FnMut(&A, &A) -> Ordering,
731 S: DataMut;
732 /// Sorts the array with a key extraction function, but might not preserve the order of equal
733 /// elements.
734 ///
735 /// This sort is unstable (i.e., may reorder equal elements), in-place
736 /// (i.e., does not allocate), and *O*(*mn* log *n*) worst-case, where the key function is
737 /// *O*(*m*).
738 ///
739 /// # Current Implementation
740 ///
741 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
742 /// which combines the fast average case of randomized quicksort with the fast worst case of
743 /// heapsort, while achieving linear time on arrays with certain patterns. It uses some
744 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
745 /// deterministic behavior.
746 ///
747 /// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
748 /// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
749 /// cases where the key function is expensive.
750 ///
751 /// # Examples
752 ///
753 /// ```
754 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
755 ///
756 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
757 ///
758 /// v.sort_unstable_by_key(|k| k.abs());
759 /// assert!(v == arr1(&[1, 2, -3, 4, -5]));
760 /// ```
761 ///
762 /// [pdqsort]: https://github.com/orlp/pdqsort
763 fn sort_unstable_by_key<K, F>(&mut self, f: F)
764 where
765 K: Ord,
766 F: FnMut(&A) -> K,
767 S: DataMut;
768
769 /// Checks if the elements of this array are sorted.
770 ///
771 /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
772 /// array yields exactly zero or one element, `true` is returned.
773 ///
774 /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
775 /// implies that this function returns `false` if any two consecutive items are not
776 /// comparable.
777 ///
778 /// # Examples
779 ///
780 /// ```
781 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
782 ///
783 /// let empty: [i32; 0] = [];
784 ///
785 /// assert!(arr1(&[1, 2, 2, 9]).is_sorted());
786 /// assert!(!arr1(&[1, 3, 2, 4]).is_sorted());
787 /// assert!(arr1(&[0]).is_sorted());
788 /// assert!(arr1(&empty).is_sorted());
789 /// assert!(!arr1(&[0.0, 1.0, f32::NAN]).is_sorted());
790 /// ```
791 #[must_use]
792 fn is_sorted(&self) -> bool
793 where
794 A: PartialOrd;
795 /// Checks if the elements of this array are sorted using the given comparator function.
796 ///
797 /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
798 /// function to determine whether two elements are to be considered in sorted order.
799 ///
800 /// # Examples
801 ///
802 /// ```
803 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
804 ///
805 /// assert!(arr1(&[1, 2, 2, 9]).is_sorted_by(|a, b| a <= b));
806 /// assert!(!arr1(&[1, 2, 2, 9]).is_sorted_by(|a, b| a < b));
807 ///
808 /// assert!(arr1(&[0]).is_sorted_by(|a, b| true));
809 /// assert!(arr1(&[0]).is_sorted_by(|a, b| false));
810 ///
811 /// let empty: [i32; 0] = [];
812 /// assert!(arr1(&empty).is_sorted_by(|a, b| false));
813 /// assert!(arr1(&empty).is_sorted_by(|a, b| true));
814 /// ```
815 #[must_use]
816 fn is_sorted_by<F>(&self, compare: F) -> bool
817 where
818 F: FnMut(&A, &A) -> bool;
819 /// Checks if the elements of this array are sorted using the given key extraction function.
820 ///
821 /// Instead of comparing the array's elements directly, this function compares the keys of the
822 /// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
823 /// documentation for more information.
824 ///
825 /// [`is_sorted`]: Slice1Ext::is_sorted
826 ///
827 /// # Examples
828 ///
829 /// ```
830 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
831 ///
832 /// assert!(arr1(&["c", "bb", "aaa"]).is_sorted_by_key(|s| s.len()));
833 /// assert!(!arr1(&[-2i32, -1, 0, 3]).is_sorted_by_key(|n| n.abs()));
834 /// ```
835 #[must_use]
836 fn is_sorted_by_key<F, K>(&self, f: F) -> bool
837 where
838 F: FnMut(&A) -> K,
839 K: PartialOrd;
840
841 /// Reorder the array in parallel such that the elements at `indices` are at their final sorted position.
842 ///
843 /// Bulk version of [`select_nth_unstable`] extending `collection` with `&mut element`
844 /// in the order of `indices`. The provided `indices` must be sorted and unique which can be
845 /// achieved with [`par_sort_unstable`] followed by [`partition_dedup`].
846 ///
847 /// # Current Implementation
848 ///
849 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses in parallel into the
850 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
851 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
852 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
853 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable`] on the
854 /// full view of `self` for each index.
855 ///
856 /// # Panics
857 ///
858 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
859 /// when `indices` is unsorted or contains duplicates.
860 ///
861 /// [`par_sort_unstable`]: Slice1Ext::par_sort_unstable
862 /// [`partition_dedup`]: Slice1Ext::partition_dedup
863 /// [`select_nth_unstable`]: Slice1Ext::select_nth_unstable
864 ///
865 /// # Examples
866 ///
867 /// ```
868 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
869 ///
870 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
871 ///
872 /// // Find values at following indices.
873 /// let indices = arr1(&[1, 4, 6]);
874 ///
875 /// let mut values = Vec::new();
876 /// v.par_select_many_nth_unstable(&indices, &mut values);
877 ///
878 /// assert!(values == [&-3, &2, &4]);
879 /// ```
880 #[cfg(feature = "rayon")]
881 fn par_select_many_nth_unstable<'a, S2>(
882 &'a mut self,
883 indices: &ArrayBase<S2, Ix1>,
884 collection: &mut Vec<&'a mut A>,
885 ) where
886 A: Ord + Send,
887 S: DataMut,
888 S2: Data<Elem = usize> + Sync;
889 /// Reorder the array in parallel with a comparator function such that the elements at `indices` are at
890 /// their final sorted position.
891 ///
892 /// Bulk version of [`select_nth_unstable_by`] extending `collection` with `&mut element`
893 /// in the order of `indices`. The provided `indices` must be sorted and unique which can be
894 /// achieved with [`par_sort_unstable`] followed by [`partition_dedup`].
895 ///
896 /// # Current Implementation
897 ///
898 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses in parallel into the
899 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
900 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
901 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
902 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable_by`] on the
903 /// full view of `self` for each index.
904 ///
905 /// # Panics
906 ///
907 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
908 /// when `indices` is unsorted or contains duplicates.
909 ///
910 /// [`par_sort_unstable`]: Slice1Ext::par_sort_unstable
911 /// [`partition_dedup`]: Slice1Ext::partition_dedup
912 /// [`select_nth_unstable_by`]: Slice1Ext::select_nth_unstable_by
913 ///
914 /// # Examples
915 ///
916 /// ```
917 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
918 /// use std::collections::HashMap;
919 ///
920 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
921 ///
922 /// // Find values at following indices.
923 /// let indices = arr1(&[1, 4, 6]);
924 ///
925 /// let mut values = Vec::new();
926 /// v.par_select_many_nth_unstable_by(&indices, &mut values, |a, b| b.cmp(a));
927 ///
928 /// assert!(values == [&4, &2, &0]);
929 /// ```
930 #[cfg(feature = "rayon")]
931 fn par_select_many_nth_unstable_by<'a, F, S2>(
932 &'a mut self,
933 indices: &ArrayBase<S2, Ix1>,
934 collection: &mut Vec<&'a mut A>,
935 compare: F,
936 ) where
937 A: Send,
938 F: Fn(&A, &A) -> Ordering + Sync,
939 S: DataMut,
940 S2: Data<Elem = usize> + Sync;
941 /// Reorder the array in parallel with a key extraction function such that the elements at `indices` are at
942 /// their final sorted position.
943 ///
944 /// Bulk version of [`select_nth_unstable_by_key`] extending `collection` with `&mut element`
945 /// in the order of `indices`. The provided `indices` must be sorted and unique which can be
946 /// achieved with [`par_sort_unstable`] followed by [`partition_dedup`].
947 ///
948 /// # Current Implementation
949 ///
950 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses in parallel into the
951 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
952 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
953 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
954 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable_by_key`] on the
955 /// full view of `self` for each index.
956 ///
957 /// # Panics
958 ///
959 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
960 /// when `indices` is unsorted or contains duplicates.
961 ///
962 /// [`par_sort_unstable`]: Slice1Ext::par_sort_unstable
963 /// [`partition_dedup`]: Slice1Ext::partition_dedup
964 /// [`select_nth_unstable_by_key`]: Slice1Ext::select_nth_unstable_by_key
965 ///
966 /// # Examples
967 ///
968 /// ```
969 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
970 /// use std::collections::HashMap;
971 ///
972 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
973 ///
974 /// // Find values at following indices.
975 /// let indices = arr1(&[1, 4, 6]);
976 ///
977 /// let mut values = Vec::new();
978 /// v.par_select_many_nth_unstable_by_key(&indices, &mut values, |&a| a.abs());
979 ///
980 /// assert!(values == [&1, &3, &4]);
981 /// ```
982 #[cfg(feature = "rayon")]
983 fn par_select_many_nth_unstable_by_key<'a, K, F, S2>(
984 &'a mut self,
985 indices: &ArrayBase<S2, Ix1>,
986 collection: &mut Vec<&'a mut A>,
987 f: F,
988 ) where
989 A: Send,
990 K: Ord,
991 F: Fn(&A) -> K + Sync,
992 S: DataMut,
993 S2: Data<Elem = usize> + Sync;
994
995 /// Reorder the array such that the elements at `indices` are at their final sorted position.
996 ///
997 /// Bulk version of [`select_nth_unstable`] extending `collection` with `(index, &mut element)`
998 /// tuples in the order of `indices`. The provided `indices` must be sorted and unique which can
999 /// be achieved with [`sort_unstable`] followed by [`partition_dedup`].
1000 ///
1001 /// # Current Implementation
1002 ///
1003 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses into the
1004 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
1005 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
1006 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
1007 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable`] on the
1008 /// full view of `self` for each index.
1009 ///
1010 /// # Panics
1011 ///
1012 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
1013 /// when `indices` is unsorted or contains duplicates.
1014 ///
1015 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1016 /// [`partition_dedup`]: Slice1Ext::partition_dedup
1017 /// [`select_nth_unstable`]: Slice1Ext::select_nth_unstable
1018 ///
1019 /// # Examples
1020 ///
1021 /// ```
1022 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1023 /// use std::collections::HashMap;
1024 ///
1025 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
1026 ///
1027 /// // Find values at following indices.
1028 /// let indices = arr1(&[1, 4, 6]);
1029 ///
1030 /// let mut map = HashMap::new();
1031 /// v.select_many_nth_unstable(&indices, &mut map);
1032 /// let values = indices.map(|index| *map[index]);
1033 ///
1034 /// assert!(values == arr1(&[-3, 2, 4]));
1035 /// ```
1036 fn select_many_nth_unstable<'a, E, S2>(
1037 &'a mut self,
1038 indices: &ArrayBase<S2, Ix1>,
1039 collection: &mut E,
1040 ) where
1041 A: Ord + 'a,
1042 E: Extend<(usize, &'a mut A)>,
1043 S: DataMut,
1044 S2: Data<Elem = usize>;
1045 /// Reorder the array with a comparator function such that the elements at `indices` are at
1046 /// their final sorted position.
1047 ///
1048 /// Bulk version of [`select_nth_unstable_by`] extending `collection` with `(index, &mut element)`
1049 /// tuples in the order of `indices`. The provided `indices` must be sorted and unique which can
1050 /// be achieved with [`sort_unstable`] followed by [`partition_dedup`].
1051 ///
1052 /// # Current Implementation
1053 ///
1054 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses into the
1055 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
1056 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
1057 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
1058 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable_by`] on the
1059 /// full view of `self` for each index.
1060 ///
1061 /// # Panics
1062 ///
1063 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
1064 /// when `indices` is unsorted or contains duplicates.
1065 ///
1066 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1067 /// [`partition_dedup`]: Slice1Ext::partition_dedup
1068 /// [`select_nth_unstable_by`]: Slice1Ext::select_nth_unstable_by
1069 ///
1070 /// # Examples
1071 ///
1072 /// ```
1073 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1074 /// use std::collections::HashMap;
1075 ///
1076 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
1077 ///
1078 /// // Find values at following indices.
1079 /// let indices = arr1(&[1, 4, 6]);
1080 ///
1081 /// let mut map = HashMap::new();
1082 /// v.select_many_nth_unstable_by(&indices, &mut map, |a, b| b.cmp(a));
1083 /// let values = indices.map(|index| *map[index]);
1084 ///
1085 /// assert!(values == arr1(&[4, 2, 0]));
1086 /// ```
1087 fn select_many_nth_unstable_by<'a, E, F, S2>(
1088 &'a mut self,
1089 indices: &ArrayBase<S2, Ix1>,
1090 collection: &mut E,
1091 compare: F,
1092 ) where
1093 A: 'a,
1094 E: Extend<(usize, &'a mut A)>,
1095 F: FnMut(&A, &A) -> Ordering,
1096 S: DataMut,
1097 S2: Data<Elem = usize>;
1098 /// Reorder the array with a key extraction function such that the elements at `indices` are at
1099 /// their final sorted position.
1100 ///
1101 /// Bulk version of [`select_nth_unstable_by_key`] extending `collection` with `(index, &mut element)`
1102 /// tuples in the order of `indices`. The provided `indices` must be sorted and unique which can
1103 /// be achieved with [`sort_unstable`] followed by [`partition_dedup`].
1104 ///
1105 /// # Current Implementation
1106 ///
1107 /// The current algorithm chooses `at = indices.len() / 2` as pivot index and recurses into the
1108 /// left and right subviews of `indices` (i.e., `..at` and `at + 1..`) with corresponding left
1109 /// and right subviews of `self` (i.e., `..pivot` and `pivot + 1..`) where `pivot = indices[at]`.
1110 /// Requiring `indices` to be already sorted, reduces the time complexity in the length *m* of
1111 /// `indices` from *O*(*m*) to *O*(log *m*) compared to invoking [`select_nth_unstable_by_key`] on the
1112 /// full view of `self` for each index.
1113 ///
1114 /// # Panics
1115 ///
1116 /// Panics when any `indices[i] >= len()`, meaning it always panics on empty arrays. Panics
1117 /// when `indices` is unsorted or contains duplicates.
1118 ///
1119 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1120 /// [`partition_dedup`]: Slice1Ext::partition_dedup
1121 /// [`select_nth_unstable_by_key`]: Slice1Ext::select_nth_unstable_by_key
1122 ///
1123 /// # Examples
1124 ///
1125 /// ```
1126 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1127 /// use std::collections::HashMap;
1128 ///
1129 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1, 9, 3, 4, 0]);
1130 ///
1131 /// // Find values at following indices.
1132 /// let indices = arr1(&[1, 4, 6]);
1133 ///
1134 /// let mut map = HashMap::new();
1135 /// v.select_many_nth_unstable_by_key(&indices, &mut map, |&a| a.abs());
1136 /// let values = indices.map(|index| *map[index]);
1137 ///
1138 /// assert!(values == arr1(&[1, 3, 4]));
1139 /// ```
1140 fn select_many_nth_unstable_by_key<'a, E, K, F, S2>(
1141 &'a mut self,
1142 indices: &ArrayBase<S2, Ix1>,
1143 collection: &mut E,
1144 f: F,
1145 ) where
1146 A: 'a,
1147 E: Extend<(usize, &'a mut A)>,
1148 K: Ord,
1149 F: FnMut(&A) -> K,
1150 S: DataMut,
1151 S2: Data<Elem = usize>;
1152 /// Reorder the array such that the element at `index` after the reordering is at its final sorted position.
1153 ///
1154 /// This reordering has the additional property that any value at position `i < index` will be
1155 /// less than or equal to any value at a position `j > index`. Additionally, this reordering is
1156 /// unstable (i.e. any number of equal elements may end up at position `index`), in-place
1157 /// (i.e. does not allocate), and runs in *O*(*n*) time.
1158 /// This function is also known as "kth element" in other libraries.
1159 ///
1160 /// It returns a triplet of the following from the reordered array:
1161 /// the subarray prior to `index`, the element at `index`, and the subarray after `index`;
1162 /// accordingly, the values in those two subarrays will respectively all be less-than-or-equal-to
1163 /// and greater-than-or-equal-to the value of the element at `index`.
1164 ///
1165 /// # Current Implementation
1166 ///
1167 /// The current algorithm is an introselect implementation based on Pattern Defeating Quicksort, which is also
1168 /// the basis for [`sort_unstable`]. The fallback algorithm is Median of Medians using Tukey's Ninther for
1169 /// pivot selection, which guarantees linear runtime for all inputs.
1170 ///
1171 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1172 ///
1173 /// # Panics
1174 ///
1175 /// Panics when `index >= len()`, meaning it always panics on empty arrays.
1176 ///
1177 /// # Examples
1178 ///
1179 /// ```
1180 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1181 ///
1182 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
1183 ///
1184 /// // Find the items less than or equal to the median, the median, and greater than or equal to
1185 /// // the median.
1186 /// let (lesser, median, greater) = v.select_nth_unstable(2);
1187 ///
1188 /// assert!(lesser == arr1(&[-3, -5]) || lesser == arr1(&[-5, -3]));
1189 /// assert_eq!(median, &mut 1);
1190 /// assert!(greater == arr1(&[4, 2]) || greater == arr1(&[2, 4]));
1191 ///
1192 /// // We are only guaranteed the array will be one of the following, based on the way we sort
1193 /// // about the specified index.
1194 /// assert!(v == arr1(&[-3, -5, 1, 2, 4]) ||
1195 /// v == arr1(&[-5, -3, 1, 2, 4]) ||
1196 /// v == arr1(&[-3, -5, 1, 4, 2]) ||
1197 /// v == arr1(&[-5, -3, 1, 4, 2]));
1198 /// ```
1199 #[must_use]
1200 fn select_nth_unstable(
1201 &mut self,
1202 index: usize,
1203 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
1204 where
1205 A: Ord,
1206 S: DataMut;
1207 /// Reorder the array with a comparator function such that the element at `index` after the reordering is at
1208 /// its final sorted position.
1209 ///
1210 /// This reordering has the additional property that any value at position `i < index` will be
1211 /// less than or equal to any value at a position `j > index` using the comparator function.
1212 /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
1213 /// position `index`), in-place (i.e. does not allocate), and runs in *O*(*n*) time.
1214 /// This function is also known as "kth element" in other libraries.
1215 ///
1216 /// It returns a triplet of the following from
1217 /// the array reordered according to the provided comparator function: the subarray prior to
1218 /// `index`, the element at `index`, and the subarray after `index`; accordingly, the values in
1219 /// those two subarrays will respectively all be less-than-or-equal-to and greater-than-or-equal-to
1220 /// the value of the element at `index`.
1221 ///
1222 /// # Current Implementation
1223 ///
1224 /// The current algorithm is an introselect implementation based on Pattern Defeating Quicksort, which is also
1225 /// the basis for [`sort_unstable`]. The fallback algorithm is Median of Medians using Tukey's Ninther for
1226 /// pivot selection, which guarantees linear runtime for all inputs.
1227 ///
1228 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1229 ///
1230 /// # Panics
1231 ///
1232 /// Panics when `index >= len()`, meaning it always panics on empty arrays.
1233 ///
1234 /// # Examples
1235 ///
1236 /// ```
1237 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1238 ///
1239 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
1240 ///
1241 /// // Find the median as if the array were sorted in descending order.
1242 /// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
1243 /// // Find the items less than or equal to the median, the median, and greater than or equal to
1244 /// // the median as if the slice were sorted in descending order.
1245 /// let (lesser, median, greater) = v.select_nth_unstable_by(2, |a, b| b.cmp(a));
1246 ///
1247 /// assert!(lesser == arr1(&[4, 2]) || lesser == arr1(&[2, 4]));
1248 /// assert_eq!(median, &mut 1);
1249 /// assert!(greater == arr1(&[-3, -5]) || greater == arr1(&[-5, -3]));
1250 ///
1251 /// // We are only guaranteed the array will be one of the following, based on the way we sort
1252 /// // about the specified index.
1253 /// assert!(v == arr1(&[2, 4, 1, -5, -3]) ||
1254 /// v == arr1(&[2, 4, 1, -3, -5]) ||
1255 /// v == arr1(&[4, 2, 1, -5, -3]) ||
1256 /// v == arr1(&[4, 2, 1, -3, -5]));
1257 /// ```
1258 #[must_use]
1259 fn select_nth_unstable_by<F>(
1260 &mut self,
1261 index: usize,
1262 compare: F,
1263 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
1264 where
1265 F: FnMut(&A, &A) -> Ordering,
1266 S: DataMut;
1267 /// Reorder the array with a key extraction function such that the element at `index` after the reordering is
1268 /// at its final sorted position.
1269 ///
1270 /// This reordering has the additional property that any value at position `i < index` will be
1271 /// less than or equal to any value at a position `j > index` using the key extraction function.
1272 /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
1273 /// position `index`), in-place (i.e. does not allocate), and runs in *O*(*n*) time.
1274 /// This function is also known as "kth element" in other libraries.
1275 ///
1276 /// It returns a triplet of the following from
1277 /// the array reordered according to the provided key extraction function: the subarray prior to
1278 /// `index`, the element at `index`, and the subarray after `index`; accordingly, the values in
1279 /// those two subarrays will respectively all be less-than-or-equal-to and greater-than-or-equal-to
1280 /// the value of the element at `index`.
1281 ///
1282 /// # Current Implementation
1283 ///
1284 /// The current algorithm is an introselect implementation based on Pattern Defeating Quicksort, which is also
1285 /// the basis for [`sort_unstable`]. The fallback algorithm is Median of Medians using Tukey's Ninther for
1286 /// pivot selection, which guarantees linear runtime for all inputs.
1287 ///
1288 /// [`sort_unstable`]: Slice1Ext::sort_unstable
1289 ///
1290 /// # Panics
1291 ///
1292 /// Panics when `index >= len()`, meaning it always panics on empty arrays.
1293 ///
1294 /// # Examples
1295 ///
1296 /// ```
1297 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1298 ///
1299 /// let mut v = arr1(&[-5i32, 4, 2, -3, 1]);
1300 ///
1301 /// // Return the median as if the array were sorted according to absolute value.
1302 /// v.select_nth_unstable_by_key(2, |a| a.abs());
1303 /// // Find the items less than or equal to the median, the median, and greater than or equal to
1304 /// // the median as if the slice were sorted according to absolute value.
1305 /// let (lesser, median, greater) = v.select_nth_unstable_by_key(2, |a| a.abs());
1306 ///
1307 /// assert!(lesser == arr1(&[1, 2]) || lesser == arr1(&[2, 1]));
1308 /// assert_eq!(median, &mut -3);
1309 /// assert!(greater == arr1(&[4, -5]) || greater == arr1(&[-5, 4]));
1310 ///
1311 /// // We are only guaranteed the array will be one of the following, based on the way we sort
1312 /// // about the specified index.
1313 /// assert!(v == arr1(&[1, 2, -3, 4, -5]) ||
1314 /// v == arr1(&[1, 2, -3, -5, 4]) ||
1315 /// v == arr1(&[2, 1, -3, 4, -5]) ||
1316 /// v == arr1(&[2, 1, -3, -5, 4]));
1317 /// ```
1318 #[must_use]
1319 fn select_nth_unstable_by_key<K, F>(
1320 &mut self,
1321 index: usize,
1322 f: F,
1323 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
1324 where
1325 K: Ord,
1326 F: FnMut(&A) -> K,
1327 S: DataMut;
1328
1329 /// Returns the index of the partition point according to the given predicate
1330 /// (the index of the first element of the second partition).
1331 ///
1332 /// The array is assumed to be partitioned according to the given predicate.
1333 /// This means that all elements for which the predicate returns true are at the start of the array
1334 /// and all elements for which the predicate returns false are at the end.
1335 /// For example, `[7, 15, 3, 5, 4, 12, 6]` is partitioned under the predicate `x % 2 != 0`
1336 /// (all odd numbers are at the start, all even at the end).
1337 ///
1338 /// If this array is not partitioned, the returned result is unspecified and meaningless,
1339 /// as this method performs a kind of binary search.
1340 ///
1341 /// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
1342 ///
1343 /// [`binary_search`]: Slice1Ext::binary_search
1344 /// [`binary_search_by`]: Slice1Ext::binary_search_by
1345 /// [`binary_search_by_key`]: Slice1Ext::binary_search_by_key
1346 ///
1347 /// # Examples
1348 ///
1349 /// ```
1350 /// use ndarray_slice::{
1351 /// ndarray::{arr1, s},
1352 /// Slice1Ext,
1353 /// };
1354 ///
1355 /// let v = arr1(&[1, 2, 3, 3, 5, 6, 7]);
1356 /// let i = v.partition_point(|&x| x < 5);
1357 ///
1358 /// assert_eq!(i, 4);
1359 /// assert!(v.slice(s![..i]).iter().all(|&x| x < 5));
1360 /// assert!(v.slice(s![i..]).iter().all(|&x| !(x < 5)));
1361 /// ```
1362 ///
1363 /// If all elements of the array match the predicate, including if the array
1364 /// is empty, then the length of the array will be returned:
1365 ///
1366 /// ```
1367 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1368 ///
1369 /// let a = arr1(&[2, 4, 8]);
1370 /// assert_eq!(a.partition_point(|x| x < &100), a.len());
1371 /// let a = arr1(&[0i32; 0]);
1372 /// assert_eq!(a.partition_point(|x| x < &100), 0);
1373 /// ```
1374 ///
1375 /// If you want to insert an item to a sorted vector, while maintaining
1376 /// sort order:
1377 ///
1378 /// ```
1379 /// use ndarray_slice::{ndarray::array, Slice1Ext};
1380 ///
1381 /// let mut s = array![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
1382 /// let num = 42;
1383 /// let idx = s.partition_point(|&x| x < num);
1384 /// let (mut s, off) = s.into_raw_vec_and_offset();
1385 /// let off = off.unwrap_or_default();
1386 /// s.insert(off + idx, num);
1387 /// assert_eq!(s[off..], [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
1388 /// ```
1389 #[must_use]
1390 fn partition_point<P>(&self, pred: P) -> usize
1391 where
1392 P: FnMut(&A) -> bool;
1393
1394 /// Returns `true` if the array contains an element with the given value.
1395 ///
1396 /// This operation is *O*(*n*).
1397 ///
1398 /// Note that if you have a sorted array, [`binary_search`] may be faster.
1399 ///
1400 /// [`binary_search`]: Slice1Ext::binary_search
1401 ///
1402 /// # Examples
1403 ///
1404 /// ```
1405 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1406 ///
1407 /// let v = arr1(&[10, 40, 30]);
1408 /// assert!(v.contains(&30));
1409 /// assert!(!v.contains(&50));
1410 /// ```
1411 ///
1412 /// If you do not have a `&A`, but some other value that you can compare
1413 /// with one (for example, `String` implements `PartialEq<str>`), you can
1414 /// use `iter().any`:
1415 ///
1416 /// ```
1417 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1418 ///
1419 /// let v = arr1(&[String::from("hello"), String::from("world")]); // array of `String`
1420 /// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
1421 /// assert!(!v.iter().any(|e| e == "hi"));
1422 /// ```
1423 #[must_use]
1424 fn contains(&self, x: &A) -> bool
1425 where
1426 A: PartialEq;
1427
1428 /// Binary searches this array for a given element.
1429 /// This behaves similarly to [`contains`] if this array is sorted.
1430 ///
1431 /// If the value is found then [`Result::Ok`] is returned, containing the
1432 /// index of the matching element. If there are multiple matches, then any
1433 /// one of the matches could be returned. The index is chosen
1434 /// deterministically, but is subject to change in future versions of Rust.
1435 /// If the value is not found then [`Result::Err`] is returned, containing
1436 /// the index where a matching element could be inserted while maintaining
1437 /// sorted order.
1438 ///
1439 /// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
1440 ///
1441 /// [`contains`]: Slice1Ext::contains
1442 /// [`binary_search_by`]: Slice1Ext::binary_search_by
1443 /// [`binary_search_by_key`]: Slice1Ext::binary_search_by_key
1444 /// [`partition_point`]: Slice1Ext::partition_point
1445 ///
1446 /// # Examples
1447 ///
1448 /// Looks up a series of four elements. The first is found, with a
1449 /// uniquely determined position; the second and third are not
1450 /// found; the fourth could match any position in `[1, 4]`.
1451 ///
1452 /// ```
1453 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1454 ///
1455 /// let s = arr1(&[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]);
1456 ///
1457 /// assert_eq!(s.binary_search(&13), Ok(9));
1458 /// assert_eq!(s.binary_search(&4), Err(7));
1459 /// assert_eq!(s.binary_search(&100), Err(13));
1460 /// let r = s.binary_search(&1);
1461 /// assert!(match r { Ok(1..=4) => true, _ => false, });
1462 /// ```
1463 ///
1464 /// If you want to find that whole *range* of matching items, rather than
1465 /// an arbitrary matching one, that can be done using [`partition_point`]:
1466 /// ```
1467 /// use ndarray_slice::{
1468 /// ndarray::{arr1, s},
1469 /// Slice1Ext,
1470 /// };
1471 ///
1472 /// let s = arr1(&[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]);
1473 ///
1474 /// let low = s.partition_point(|x| x < &1);
1475 /// assert_eq!(low, 1);
1476 /// let high = s.partition_point(|x| x <= &1);
1477 /// assert_eq!(high, 5);
1478 /// let r = s.binary_search(&1);
1479 /// assert!((low..high).contains(&r.unwrap()));
1480 ///
1481 /// assert!(s.slice(s![..low]).iter().all(|&x| x < 1));
1482 /// assert!(s.slice(s![low..high]).iter().all(|&x| x == 1));
1483 /// assert!(s.slice(s![high..]).iter().all(|&x| x > 1));
1484 ///
1485 /// // For something not found, the "range" of equal items is empty
1486 /// assert_eq!(s.partition_point(|x| x < &11), 9);
1487 /// assert_eq!(s.partition_point(|x| x <= &11), 9);
1488 /// assert_eq!(s.binary_search(&11), Err(9));
1489 /// ```
1490 ///
1491 /// If you want to insert an item to a sorted vector, while maintaining
1492 /// sort order, consider using [`partition_point`]:
1493 ///
1494 /// ```
1495 /// use ndarray_slice::{
1496 /// ndarray::{arr1, array},
1497 /// Slice1Ext,
1498 /// };
1499 ///
1500 /// let mut s = array![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
1501 /// let num = 42;
1502 /// let idx = s.partition_point(|&x| x < num);
1503 /// // The above is equivalent to `let idx = s.binary_search(&num).unwrap_or_else(|x| x);`
1504 /// let (mut s, off) = s.into_raw_vec_and_offset();
1505 /// let off = off.unwrap_or_default();
1506 /// s.insert(off + idx, num);
1507 /// assert_eq!(s[off..], [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
1508 /// ```
1509 fn binary_search(&self, x: &A) -> Result<usize, usize>
1510 where
1511 A: Ord;
1512 /// Binary searches this array with a comparator function.
1513 /// This behaves similarly to [`contains`] if this array is sorted.
1514 ///
1515 /// The comparator function should implement an order consistent
1516 /// with the sort order of the underlying array, returning an
1517 /// order code that indicates whether its argument is `Less`,
1518 /// `Equal` or `Greater` the desired target.
1519 ///
1520 /// If the value is found then [`Result::Ok`] is returned, containing the
1521 /// index of the matching element. If there are multiple matches, then any
1522 /// one of the matches could be returned. The index is chosen
1523 /// deterministically, but is subject to change in future versions of Rust.
1524 /// If the value is not found then [`Result::Err`] is returned, containing
1525 /// the index where a matching element could be inserted while maintaining
1526 /// sorted order.
1527 ///
1528 /// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
1529 ///
1530 /// [`contains`]: Slice1Ext::contains
1531 /// [`binary_search`]: Slice1Ext::binary_search
1532 /// [`binary_search_by_key`]: Slice1Ext::binary_search_by_key
1533 /// [`partition_point`]: Slice1Ext::partition_point
1534 ///
1535 /// # Examples
1536 ///
1537 /// Looks up a series of four elements. The first is found, with a
1538 /// uniquely determined position; the second and third are not
1539 /// found; the fourth could match any position in `[1, 4]`.
1540 ///
1541 /// ```
1542 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1543 ///
1544 /// let s = arr1(&[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]);
1545 ///
1546 /// let seek = 13;
1547 /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
1548 /// let seek = 4;
1549 /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
1550 /// let seek = 100;
1551 /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
1552 /// let seek = 1;
1553 /// let r = s.binary_search_by(|probe| probe.cmp(&seek));
1554 /// assert!(match r { Ok(1..=4) => true, _ => false, });
1555 /// ```
1556 fn binary_search_by<F>(&self, f: F) -> Result<usize, usize>
1557 where
1558 F: FnMut(&A) -> Ordering;
1559 /// Binary searches this array with a key extraction function.
1560 /// This behaves similarly to [`contains`] if this array is sorted.
1561 ///
1562 #[cfg_attr(
1563 feature = "alloc",
1564 doc = "\
1565 Assumes that the array is sorted by the key, for instance with
1566 [`sort_by_key`] using the same key extraction function."
1567 )]
1568 ///
1569 /// If the value is found then [`Result::Ok`] is returned, containing the
1570 /// index of the matching element. If there are multiple matches, then any
1571 /// one of the matches could be returned. The index is chosen
1572 /// deterministically, but is subject to change in future versions of Rust.
1573 /// If the value is not found then [`Result::Err`] is returned, containing
1574 /// the index where a matching element could be inserted while maintaining
1575 /// sorted order.
1576 ///
1577 /// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
1578 ///
1579 /// [`contains`]: Slice1Ext::contains
1580 #[cfg_attr(
1581 feature = "alloc",
1582 doc = "\
1583 [`sort_by_key`]: Slice1Ext::sort_by_key"
1584 )]
1585 /// [`binary_search`]: Slice1Ext::binary_search
1586 /// [`binary_search_by`]: Slice1Ext::binary_search_by
1587 /// [`partition_point`]: Slice1Ext::partition_point
1588 ///
1589 /// # Examples
1590 ///
1591 /// Looks up a series of four elements in a array of pairs sorted by
1592 /// their second elements. The first is found, with a uniquely
1593 /// determined position; the second and third are not found; the
1594 /// fourth could match any position in `[1, 4]`.
1595 ///
1596 /// ```
1597 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1598 ///
1599 /// let s = arr1(&[(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
1600 /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
1601 /// (1, 21), (2, 34), (4, 55)]);
1602 ///
1603 /// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
1604 /// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
1605 /// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
1606 /// let r = s.binary_search_by_key(&1, |&(a, b)| b);
1607 /// assert!(match r { Ok(1..=4) => true, _ => false, });
1608 /// ```
1609 fn binary_search_by_key<B, F>(&self, b: &B, f: F) -> Result<usize, usize>
1610 where
1611 F: FnMut(&A) -> B,
1612 B: Ord;
1613
1614 /// Moves all consecutive repeated elements to the end of the array according to the
1615 /// [`PartialEq`] trait implementation.
1616 ///
1617 /// Returns two arrays. The first contains no consecutive repeated elements.
1618 /// The second contains all the duplicates in no specified order.
1619 ///
1620 /// If the array is sorted, the first returned array contains no duplicates.
1621 ///
1622 /// # Examples
1623 ///
1624 /// ```
1625 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1626 ///
1627 /// let mut array = arr1(&[1, 2, 2, 3, 3, 2, 1, 1]);
1628 ///
1629 /// let (dedup, duplicates) = array.partition_dedup();
1630 ///
1631 /// assert_eq!(dedup, arr1(&[1, 2, 3, 2, 1]));
1632 /// assert_eq!(duplicates, arr1(&[2, 3, 1]));
1633 /// ```
1634 fn partition_dedup(&mut self) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
1635 where
1636 A: PartialEq,
1637 S: DataMut;
1638 /// Moves all but the first of consecutive elements to the end of the array satisfying
1639 /// a given equality relation.
1640 ///
1641 /// Returns two arrays. The first contains no consecutive repeated elements.
1642 /// The second contains all the duplicates in no specified order.
1643 ///
1644 /// The `same_bucket` function is passed references to two elements from the array and
1645 /// must determine if the elements compare equal. The elements are passed in opposite order
1646 /// from their order in the array, so if `same_bucket(a, b)` returns `true`, `a` is moved
1647 /// at the end of the array.
1648 ///
1649 /// If the array is sorted, the first returned array contains no duplicates.
1650 ///
1651 /// # Examples
1652 ///
1653 /// ```
1654 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1655 ///
1656 /// let mut array = arr1(&["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"]);
1657 ///
1658 /// let (dedup, duplicates) = array.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
1659 ///
1660 /// assert_eq!(dedup, arr1(&["foo", "BAZ", "Bar", "baz"]));
1661 /// assert_eq!(duplicates, arr1(&["bar", "Foo", "BAZ"]));
1662 /// ```
1663 fn partition_dedup_by<F>(
1664 &mut self,
1665 same_bucket: F,
1666 ) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
1667 where
1668 F: FnMut(&mut A, &mut A) -> bool,
1669 S: DataMut;
1670 /// Moves all but the first of consecutive elements to the end of the array that resolve
1671 /// to the same key.
1672 ///
1673 /// Returns two arrays. The first contains no consecutive repeated elements.
1674 /// The second contains all the duplicates in no specified order.
1675 ///
1676 /// If the array is sorted, the first returned array contains no duplicates.
1677 ///
1678 /// # Examples
1679 ///
1680 /// ```
1681 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1682 ///
1683 /// let mut array = arr1(&[10, 20, 21, 30, 30, 20, 11, 13]);
1684 ///
1685 /// let (dedup, duplicates) = array.partition_dedup_by_key(|i| *i / 10);
1686 ///
1687 /// assert_eq!(dedup, arr1(&[10, 20, 30, 20, 11]));
1688 /// assert_eq!(duplicates, arr1(&[21, 30, 13]));
1689 /// ```
1690 fn partition_dedup_by_key<K, F>(
1691 &mut self,
1692 key: F,
1693 ) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
1694 where
1695 F: FnMut(&mut A) -> K,
1696 K: PartialEq,
1697 S: DataMut;
1698
1699 /// Reverses the order of elements in the array, in place.
1700 ///
1701 /// # Examples
1702 ///
1703 /// ```
1704 /// use ndarray_slice::{ndarray::arr1, Slice1Ext};
1705 ///
1706 /// let mut v = arr1(&[1, 2, 3]);
1707 /// v.reverse();
1708 /// assert!(v == arr1(&[3, 2, 1]));
1709 /// ```
1710 fn reverse(&mut self)
1711 where
1712 S: DataMut;
1713}
1714
1715impl<A, S> Slice1Ext<A, S> for ArrayBase<S, Ix1>
1716where
1717 S: Data<Elem = A>,
1718{
1719 #[cfg(feature = "rayon")]
1720 #[inline]
1721 fn par_sort(&mut self)
1722 where
1723 A: Ord + Send,
1724 S: DataMut,
1725 {
1726 par_merge_sort(self.view_mut(), A::lt);
1727 }
1728 #[cfg(feature = "rayon")]
1729 #[inline]
1730 fn par_sort_by<F>(&mut self, compare: F)
1731 where
1732 A: Send,
1733 F: Fn(&A, &A) -> Ordering + Sync,
1734 S: DataMut,
1735 {
1736 par_merge_sort(self.view_mut(), |a: &A, b: &A| compare(a, b) == Less)
1737 }
1738 #[cfg(feature = "rayon")]
1739 #[inline]
1740 fn par_sort_by_key<K, F>(&mut self, f: F)
1741 where
1742 A: Send,
1743 K: Ord,
1744 F: Fn(&A) -> K + Sync,
1745 S: DataMut,
1746 {
1747 par_merge_sort(self.view_mut(), |a: &A, b: &A| f(a).lt(&f(b)))
1748 }
1749 #[cfg(feature = "rayon")]
1750 fn par_sort_by_cached_key<K, F>(&mut self, f: F)
1751 where
1752 A: Send + Sync,
1753 F: Fn(&A) -> K + Sync,
1754 K: Ord + Send,
1755 S: DataMut,
1756 {
1757 use core::mem;
1758 use rayon::{
1759 iter::{ParallelBridge, ParallelIterator},
1760 slice::ParallelSliceMut,
1761 };
1762
1763 //let slice = self.as_parallel_slice_mut();
1764 let len = self.len();
1765 if len < 2 {
1766 return;
1767 }
1768
1769 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
1770 macro_rules! sort_by_key {
1771 ($t:ty) => {{
1772 let mut indices: Vec<_> = self
1773 .iter()
1774 .enumerate()
1775 .par_bridge()
1776 .map(|(i, x)| (f(&*x), i as $t))
1777 .collect();
1778 // The elements of `indices` are unique, as they are indexed, so any sort will be
1779 // stable with respect to the original slice. We use `sort_unstable` here because
1780 // it requires less memory allocation.
1781 indices.par_sort_unstable();
1782 for i in 0..len {
1783 let mut index = indices[i].1;
1784 while (index as usize) < i {
1785 index = indices[index as usize].1;
1786 }
1787 indices[i].1 = index;
1788 self.swap(i, index as usize);
1789 }
1790 }};
1791 }
1792
1793 let sz_u8 = mem::size_of::<(K, u8)>();
1794 let sz_u16 = mem::size_of::<(K, u16)>();
1795 let sz_u32 = mem::size_of::<(K, u32)>();
1796 let sz_usize = mem::size_of::<(K, usize)>();
1797
1798 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
1799 return sort_by_key!(u8);
1800 }
1801 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
1802 return sort_by_key!(u16);
1803 }
1804 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
1805 return sort_by_key!(u32);
1806 }
1807 sort_by_key!(usize)
1808 }
1809
1810 #[cfg(feature = "alloc")]
1811 #[inline]
1812 fn sort(&mut self)
1813 where
1814 A: Ord,
1815 S: DataMut,
1816 {
1817 stable_sort(self.view_mut(), A::lt);
1818 }
1819 #[cfg(feature = "alloc")]
1820 #[inline]
1821 fn sort_by<F>(&mut self, mut compare: F)
1822 where
1823 F: FnMut(&A, &A) -> Ordering,
1824 S: DataMut,
1825 {
1826 stable_sort(self.view_mut(), &mut |a: &A, b: &A| compare(a, b) == Less)
1827 }
1828 #[cfg(feature = "alloc")]
1829 #[inline]
1830 fn sort_by_key<K, F>(&mut self, mut f: F)
1831 where
1832 K: Ord,
1833 F: FnMut(&A) -> K,
1834 S: DataMut,
1835 {
1836 stable_sort(self.view_mut(), &mut |a: &A, b: &A| f(a).lt(&f(b)))
1837 }
1838 #[cfg(feature = "std")]
1839 fn sort_by_cached_key<K, F>(&mut self, f: F)
1840 where
1841 F: FnMut(&A) -> K,
1842 K: Ord,
1843 S: DataMut,
1844 {
1845 use core::mem;
1846
1847 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
1848 macro_rules! sort_by_key {
1849 ($t:ty, $array:ident, $f:ident) => {{
1850 let mut indices: Vec<_> = $array
1851 .iter()
1852 .map($f)
1853 .enumerate()
1854 .map(|(i, k)| (k, i as $t))
1855 .collect();
1856 // The elements of `indices` are unique, as they are indexed, so any sort will be
1857 // stable with respect to the original array. We use `sort_unstable` here because
1858 // it requires less memory allocation.
1859 indices.sort_unstable();
1860 for i in 0..$array.len() {
1861 let mut index = indices[i].1;
1862 while (index as usize) < i {
1863 index = indices[index as usize].1;
1864 }
1865 indices[i].1 = index;
1866 $array.swap(i, index as usize);
1867 }
1868 }};
1869 }
1870
1871 let sz_u8 = mem::size_of::<(K, u8)>();
1872 let sz_u16 = mem::size_of::<(K, u16)>();
1873 let sz_u32 = mem::size_of::<(K, u32)>();
1874 let sz_usize = mem::size_of::<(K, usize)>();
1875
1876 let len = self.len();
1877 if len < 2 {
1878 return;
1879 }
1880 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
1881 return sort_by_key!(u8, self, f);
1882 }
1883 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
1884 return sort_by_key!(u16, self, f);
1885 }
1886 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
1887 return sort_by_key!(u32, self, f);
1888 }
1889 sort_by_key!(usize, self, f)
1890 }
1891
1892 #[cfg(feature = "rayon")]
1893 #[inline]
1894 fn par_sort_unstable(&mut self)
1895 where
1896 A: Ord + Send,
1897 S: DataMut,
1898 {
1899 par_quick_sort(self.view_mut(), A::lt);
1900 }
1901 #[cfg(feature = "rayon")]
1902 #[inline]
1903 fn par_sort_unstable_by<F>(&mut self, compare: F)
1904 where
1905 A: Send,
1906 F: Fn(&A, &A) -> Ordering + Sync,
1907 S: DataMut,
1908 {
1909 par_quick_sort(self.view_mut(), |a: &A, b: &A| compare(a, b) == Less)
1910 }
1911 #[cfg(feature = "rayon")]
1912 #[inline]
1913 fn par_sort_unstable_by_key<K, F>(&mut self, f: F)
1914 where
1915 A: Send,
1916 K: Ord,
1917 F: Fn(&A) -> K + Sync,
1918 S: DataMut,
1919 {
1920 par_quick_sort(self.view_mut(), |a: &A, b: &A| f(a).lt(&f(b)))
1921 }
1922
1923 #[inline]
1924 fn sort_unstable(&mut self)
1925 where
1926 A: Ord,
1927 S: DataMut,
1928 {
1929 quick_sort(self.view_mut(), A::lt);
1930 }
1931 #[inline]
1932 fn sort_unstable_by<F>(&mut self, mut compare: F)
1933 where
1934 F: FnMut(&A, &A) -> Ordering,
1935 S: DataMut,
1936 {
1937 quick_sort(self.view_mut(), &mut |a: &A, b: &A| compare(a, b) == Less)
1938 }
1939 #[inline]
1940 fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
1941 where
1942 K: Ord,
1943 F: FnMut(&A) -> K,
1944 S: DataMut,
1945 {
1946 quick_sort(self.view_mut(), &mut |a: &A, b: &A| f(a).lt(&f(b)))
1947 }
1948
1949 #[inline]
1950 fn is_sorted(&self) -> bool
1951 where
1952 A: PartialOrd,
1953 {
1954 is_sorted(self.view(), |a, b| a <= b)
1955 }
1956 #[inline]
1957 fn is_sorted_by<F>(&self, compare: F) -> bool
1958 where
1959 F: FnMut(&A, &A) -> bool,
1960 {
1961 is_sorted(self.view(), compare)
1962 }
1963 #[inline]
1964 fn is_sorted_by_key<F, K>(&self, mut f: F) -> bool
1965 where
1966 F: FnMut(&A) -> K,
1967 K: PartialOrd,
1968 {
1969 is_sorted(self.view(), |a, b| f(a) <= f(b))
1970 }
1971
1972 #[cfg(feature = "rayon")]
1973 #[inline]
1974 fn par_select_many_nth_unstable<'a, S2>(
1975 &'a mut self,
1976 indices: &ArrayBase<S2, Ix1>,
1977 collection: &mut Vec<&'a mut A>,
1978 ) where
1979 A: Ord + Send,
1980 S: DataMut,
1981 S2: Data<Elem = usize> + Sync,
1982 {
1983 collection.reserve_exact(indices.len());
1984 let values = collection.spare_capacity_mut();
1985 par_partition_at_indices(self.view_mut(), 0, indices.view(), values, &A::lt);
1986 unsafe { collection.set_len(collection.len() + indices.len()) };
1987 }
1988 #[cfg(feature = "rayon")]
1989 #[inline]
1990 fn par_select_many_nth_unstable_by<'a, F, S2>(
1991 &'a mut self,
1992 indices: &ArrayBase<S2, Ix1>,
1993 collection: &mut Vec<&'a mut A>,
1994 compare: F,
1995 ) where
1996 A: Send,
1997 F: Fn(&A, &A) -> Ordering + Sync,
1998 S: DataMut,
1999 S2: Data<Elem = usize> + Sync,
2000 {
2001 collection.reserve_exact(indices.len());
2002 let values = collection.spare_capacity_mut();
2003 par_partition_at_indices(
2004 self.view_mut(),
2005 0,
2006 indices.view(),
2007 values,
2008 &|a: &A, b: &A| compare(a, b) == Less,
2009 );
2010 unsafe { collection.set_len(collection.len() + indices.len()) };
2011 }
2012 #[cfg(feature = "rayon")]
2013 #[inline]
2014 fn par_select_many_nth_unstable_by_key<'a, K, F, S2>(
2015 &'a mut self,
2016 indices: &ArrayBase<S2, Ix1>,
2017 collection: &mut Vec<&'a mut A>,
2018 f: F,
2019 ) where
2020 A: Send,
2021 K: Ord,
2022 F: Fn(&A) -> K + Sync,
2023 S: DataMut,
2024 S2: Data<Elem = usize> + Sync,
2025 {
2026 collection.reserve_exact(indices.len());
2027 let values = collection.spare_capacity_mut();
2028 par_partition_at_indices(
2029 self.view_mut(),
2030 0,
2031 indices.view(),
2032 values,
2033 &|a: &A, b: &A| f(a).lt(&f(b)),
2034 );
2035 unsafe { collection.set_len(collection.len() + indices.len()) };
2036 }
2037
2038 #[inline]
2039 fn select_many_nth_unstable<'a, E, S2>(
2040 &'a mut self,
2041 indices: &ArrayBase<S2, Ix1>,
2042 collection: &mut E,
2043 ) where
2044 A: Ord + 'a,
2045 E: Extend<(usize, &'a mut A)>,
2046 S: DataMut,
2047 S2: Data<Elem = usize>,
2048 {
2049 partition_at_indices(self.view_mut(), 0, indices.view(), collection, &mut A::lt);
2050 }
2051 #[inline]
2052 fn select_many_nth_unstable_by<'a, E, F, S2>(
2053 &'a mut self,
2054 indices: &ArrayBase<S2, Ix1>,
2055 collection: &mut E,
2056 mut compare: F,
2057 ) where
2058 A: 'a,
2059 E: Extend<(usize, &'a mut A)>,
2060 F: FnMut(&A, &A) -> Ordering,
2061 S: DataMut,
2062 S2: Data<Elem = usize>,
2063 {
2064 partition_at_indices(
2065 self.view_mut(),
2066 0,
2067 indices.view(),
2068 collection,
2069 &mut |a: &A, b: &A| compare(a, b) == Less,
2070 );
2071 }
2072 #[inline]
2073 fn select_many_nth_unstable_by_key<'a, E, K, F, S2>(
2074 &'a mut self,
2075 indices: &ArrayBase<S2, Ix1>,
2076 collection: &mut E,
2077 mut f: F,
2078 ) where
2079 A: 'a,
2080 E: Extend<(usize, &'a mut A)>,
2081 K: Ord,
2082 F: FnMut(&A) -> K,
2083 S: DataMut,
2084 S2: Data<Elem = usize>,
2085 {
2086 partition_at_indices(
2087 self.view_mut(),
2088 0,
2089 indices.view(),
2090 collection,
2091 &mut |a: &A, b: &A| f(a).lt(&f(b)),
2092 );
2093 }
2094
2095 #[inline]
2096 fn select_nth_unstable(
2097 &mut self,
2098 index: usize,
2099 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
2100 where
2101 A: Ord,
2102 S: DataMut,
2103 {
2104 partition_at_index(self.view_mut(), index, &mut A::lt)
2105 }
2106 #[inline]
2107 fn select_nth_unstable_by<F>(
2108 &mut self,
2109 index: usize,
2110 mut compare: F,
2111 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
2112 where
2113 F: FnMut(&A, &A) -> Ordering,
2114 S: DataMut,
2115 {
2116 partition_at_index(self.view_mut(), index, &mut |a: &A, b: &A| {
2117 compare(a, b) == Less
2118 })
2119 }
2120 #[inline]
2121 fn select_nth_unstable_by_key<K, F>(
2122 &mut self,
2123 index: usize,
2124 mut f: F,
2125 ) -> (ArrayViewMut1<'_, A>, &mut A, ArrayViewMut1<'_, A>)
2126 where
2127 K: Ord,
2128 F: FnMut(&A) -> K,
2129 S: DataMut,
2130 {
2131 partition_at_index(self.view_mut(), index, &mut |a: &A, b: &A| f(a).lt(&f(b)))
2132 }
2133
2134 #[inline]
2135 fn partition_point<P>(&self, mut pred: P) -> usize
2136 where
2137 P: FnMut(&A) -> bool,
2138 {
2139 self.binary_search_by(|x| if pred(x) { Less } else { Greater })
2140 .unwrap_or_else(|i| i)
2141 }
2142
2143 #[inline]
2144 fn contains(&self, x: &A) -> bool
2145 where
2146 A: PartialEq,
2147 {
2148 self.iter().any(|a| a == x)
2149 }
2150
2151 #[inline]
2152 fn binary_search(&self, x: &A) -> Result<usize, usize>
2153 where
2154 A: Ord,
2155 {
2156 self.binary_search_by(|p| p.cmp(x))
2157 }
2158 fn binary_search_by<F>(&self, mut f: F) -> Result<usize, usize>
2159 where
2160 F: FnMut(&A) -> Ordering,
2161 {
2162 // INVARIANTS:
2163 // - 0 <= left <= left + size = right <= self.len()
2164 // - f returns Less for everything in self[..left]
2165 // - f returns Greater for everything in self[right..]
2166 let mut size = self.len();
2167 let mut left = 0;
2168 let mut right = size;
2169 while left < right {
2170 let mid = left + size / 2;
2171
2172 // SAFETY: the while condition means `size` is strictly positive, so
2173 // `size/2 < size`. Thus `left + size/2 < left + size`, which
2174 // coupled with the `left + size <= self.len()` invariant means
2175 // we have `left + size/2 < self.len()`, and this is in-bounds.
2176 let cmp = f(unsafe { self.uget(mid) });
2177
2178 // This control flow produces conditional moves, which results in
2179 // fewer branches and instructions than if/else or matching on
2180 // cmp::Ordering.
2181 // This is x86 asm for u8: https://rust.godbolt.org/z/698eYffTx.
2182 left = if cmp == Less { mid + 1 } else { left };
2183 right = if cmp == Greater { mid } else { right };
2184 if cmp == Equal {
2185 // SAFETY: same as the `get_unchecked` above
2186 //unsafe { crate::intrinsics::assume(mid < self.len()) };
2187 debug_assert!(mid < self.len());
2188 return Ok(mid);
2189 }
2190
2191 size = right - left;
2192 }
2193
2194 // SAFETY: directly true from the overall invariant.
2195 // Note that this is `<=`, unlike the assume in the `Ok` path.
2196 //unsafe { crate::intrinsics::assume(left <= self.len()) };
2197 debug_assert!(left <= self.len());
2198 Err(left)
2199 }
2200 #[inline]
2201 fn binary_search_by_key<B, F>(&self, b: &B, mut f: F) -> Result<usize, usize>
2202 where
2203 F: FnMut(&A) -> B,
2204 B: Ord,
2205 {
2206 self.binary_search_by(|k| f(k).cmp(b))
2207 }
2208
2209 #[inline]
2210 fn partition_dedup(&mut self) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
2211 where
2212 A: PartialEq,
2213 S: DataMut,
2214 {
2215 self.partition_dedup_by(|a, b| a == b)
2216 }
2217 #[inline]
2218 fn partition_dedup_by<F>(
2219 &mut self,
2220 same_bucket: F,
2221 ) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
2222 where
2223 F: FnMut(&mut A, &mut A) -> bool,
2224 S: DataMut,
2225 {
2226 partition_dedup(self.view_mut(), same_bucket)
2227 }
2228 #[inline]
2229 fn partition_dedup_by_key<K, F>(
2230 &mut self,
2231 mut key: F,
2232 ) -> (ArrayViewMut1<'_, A>, ArrayViewMut1<'_, A>)
2233 where
2234 F: FnMut(&mut A) -> K,
2235 K: PartialEq,
2236 S: DataMut,
2237 {
2238 self.partition_dedup_by(|a, b| key(a) == key(b))
2239 }
2240
2241 #[inline]
2242 fn reverse(&mut self)
2243 where
2244 S: DataMut,
2245 {
2246 reverse(self.view_mut());
2247 }
2248}