use std::iter::{repeat, zip, Cycle, FusedIterator, StepBy, Take};
use std::ops::{Add, Range};
use std::slice;
use crate::index_iterator::DynIndices;
use crate::layout::Layout;
use crate::slice_range::{to_slice_items, SliceItem, SliceRange};
use super::{
AsView, MutLayout, NdTensorView, NdTensorViewMut, TensorBase, TensorView, TensorViewMut,
};
pub(crate) struct ViewRef<'d, 'l, T, L: Layout> {
data: &'d [T],
layout: &'l L,
}
impl<'d, 'l, T, L: Layout> ViewRef<'d, 'l, T, L> {
pub(crate) fn new(data: &'d [T], layout: &'l L) -> ViewRef<'d, 'l, T, L> {
ViewRef { data, layout }
}
fn contiguous_data(&self) -> Option<&'d [T]> {
self.layout.is_contiguous().then_some(self.data)
}
fn shape(&self) -> L::Index<'_> {
self.layout.shape()
}
}
impl<'d, 'l, T, L: Layout> Clone for ViewRef<'d, 'l, T, L> {
fn clone(&self) -> ViewRef<'d, 'l, T, L> {
ViewRef {
data: self.data,
layout: self.layout,
}
}
}
pub(crate) struct MutViewRef<'d, 'l, T, L: Layout> {
data: &'d mut [T],
layout: &'l L,
}
impl<'d, 'l, T, L: Layout> MutViewRef<'d, 'l, T, L> {
pub(crate) fn new(data: &'d mut [T], layout: &'l L) -> MutViewRef<'d, 'l, T, L> {
MutViewRef { data, layout }
}
}
#[derive(Copy, Clone, Debug)]
struct IterPos {
steps_remaining: usize,
steps: usize,
offset_step: isize,
}
impl IterPos {
fn new(steps: usize, offset_step: isize) -> IterPos {
IterPos {
steps_remaining: steps.saturating_sub(1),
steps,
offset_step,
}
}
#[inline(always)]
fn step(&mut self) -> bool {
if self.steps_remaining != 0 {
self.steps_remaining -= 1;
true
} else {
self.steps_remaining = self.steps.saturating_sub(1);
false
}
}
}
#[derive(Clone, Debug)]
struct IndexingIterBase {
len: usize,
offset: isize,
pos: Vec<IterPos>,
}
impl IndexingIterBase {
fn new<L: Layout>(layout: &L) -> IndexingIterBase {
let dims = layout
.shape()
.as_ref()
.iter()
.enumerate()
.map(|(dim, &len)| IterPos::new(len, layout.stride(dim) as isize))
.collect();
IndexingIterBase {
len: layout.len(),
offset: 0,
pos: dims,
}
}
fn broadcast<L: Layout>(layout: &L, shape: &[usize]) -> IndexingIterBase {
let added_dims = shape.len() - layout.ndim();
let layout_shape = layout.shape();
let layout_shape = layout_shape.as_ref();
let padded_tensor_shape = repeat(&0).take(added_dims).chain(layout_shape.iter());
let dims = zip(padded_tensor_shape, shape.iter())
.enumerate()
.map(|(dim, (&actual_len, &broadcast_len))| {
let offset_step = if actual_len == broadcast_len {
layout.stride(dim - added_dims) as isize
} else {
0
};
IterPos::new(broadcast_len, offset_step)
})
.collect();
IndexingIterBase {
len: shape.iter().product(),
offset: 0,
pos: dims,
}
}
fn slice<L: Layout>(layout: &L, range: &[SliceItem]) -> IndexingIterBase {
assert!(
range.len() == layout.ndim(),
"slice dimensions {} do not match tensor dimensions {}",
range.len(),
layout.ndim()
);
let mut offset = 0;
let dims: Vec<_> = range
.iter()
.enumerate()
.map(|(dim, range)| {
let len = layout.size(dim);
let range = match range {
SliceItem::Index(idx) => {
let len = len as isize;
assert!(*idx >= -len && *idx < len, "slice index is invalid");
SliceRange::new(*idx, Some(*idx + 1), 1)
}
SliceItem::Range(range) => range.clamp(len),
};
let stride = layout.stride(dim);
let start_index = if range.start >= 0 {
range.start
} else {
(len as isize) + range.start
};
if start_index >= 0 && start_index < (len as isize) {
offset += stride * start_index as usize;
} else {
assert!(range.steps(len) == 0);
}
IterPos::new(range.steps(len), (stride as isize) * range.step())
})
.collect();
IndexingIterBase {
len: dims.iter().map(|dim| dim.steps).product(),
offset: offset as isize,
pos: dims,
}
}
#[inline(always)]
fn step_dim(&mut self, mut dim: usize, stride: usize) {
self.len -= stride;
let mut pos = &mut self.pos[dim];
while !pos.step() {
self.offset -= pos.offset_step * (pos.steps as isize - 1);
if dim == 0 {
break;
}
dim -= 1;
pos = &mut self.pos[dim];
}
self.offset += pos.offset_step;
}
#[inline(always)]
fn step(&mut self) {
self.step_dim(self.pos.len() - 1, 1);
}
fn step_by(&mut self, n: usize) {
let mut n = n.min(self.len);
while n > 0 {
let mut dim = self.pos.len() - 1;
let mut stride = 1;
while dim > 0 {
let next_stride = stride * self.pos[dim].steps;
if next_stride >= n {
break;
}
dim -= 1;
stride = next_stride;
}
let n_steps = n / stride;
for _ in 0..n_steps {
n -= stride;
self.step_dim(dim, stride);
}
}
}
}
#[derive(Clone)]
pub struct Iter<'a, T> {
iter: IterKind<'a, T>,
}
#[derive(Clone)]
enum IterKind<'a, T> {
Direct(slice::Iter<'a, T>),
Indexing(IndexingIter<'a, T>),
}
impl<'a, T> Iter<'a, T> {
pub(super) fn new<L: Layout>(view: ViewRef<'a, '_, T, L>) -> Iter<'a, T> {
if let Some(data) = view.contiguous_data() {
Iter {
iter: IterKind::Direct(data.iter()),
}
} else {
Iter {
iter: IterKind::Indexing(IndexingIter::new(view)),
}
}
}
pub(super) fn slice<L: Layout>(
view: ViewRef<'a, '_, T, L>,
range: &[SliceItem],
) -> Iter<'a, T> {
let iter = IndexingIter {
base: IndexingIterBase::slice(view.layout, range),
data: view.data,
};
Iter {
iter: IterKind::Indexing(iter),
}
}
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
match self.iter {
IterKind::Direct(ref mut iter) => iter.next(),
IterKind::Indexing(ref mut iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.iter {
IterKind::Direct(iter) => iter.size_hint(),
IterKind::Indexing(iter) => iter.size_hint(),
}
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
match self.iter {
IterKind::Direct(ref mut iter) => iter.nth(n),
IterKind::Indexing(ref mut iter) => {
iter.base.step_by(n);
iter.next()
}
}
}
}
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
impl<'a, T> FusedIterator for Iter<'a, T> {}
#[derive(Clone)]
struct IndexingIter<'a, T> {
base: IndexingIterBase,
data: &'a [T],
}
impl<'a, T> IndexingIter<'a, T> {
fn new<L: Layout>(view: ViewRef<'a, '_, T, L>) -> IndexingIter<'a, T> {
IndexingIter {
base: IndexingIterBase::new(view.layout),
data: view.data,
}
}
fn broadcast<L: Layout>(view: ViewRef<'a, '_, T, L>, shape: &[usize]) -> IndexingIter<'a, T> {
IndexingIter {
base: IndexingIterBase::broadcast(view.layout, shape),
data: view.data,
}
}
}
impl<'a, T> Iterator for IndexingIter<'a, T> {
type Item = &'a T;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if self.base.len == 0 {
return None;
}
let element = &self.data[self.base.offset as usize];
self.base.step();
Some(element)
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.base.len, Some(self.base.len))
}
}
impl<'a, T> ExactSizeIterator for IndexingIter<'a, T> {}
impl<'a, T> FusedIterator for IndexingIter<'a, T> {}
pub struct IterMut<'a, T> {
iter: IterMutKind<'a, T>,
}
enum IterMutKind<'a, T> {
Direct(slice::IterMut<'a, T>),
Indexing(IndexingIterMut<'a, T>),
}
impl<'a, T> IterMut<'a, T> {
pub(super) fn new<L: Layout>(view: MutViewRef<'a, '_, T, L>) -> IterMut<'a, T> {
if view.layout.is_contiguous() {
IterMut {
iter: IterMutKind::Direct(view.data.iter_mut()),
}
} else {
IterMut {
iter: IterMutKind::Indexing(IndexingIterMut::new(view)),
}
}
}
}
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.iter {
IterMutKind::Direct(ref mut iter) => iter.next(),
IterMutKind::Indexing(ref mut iter) => iter.next(),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.iter {
IterMutKind::Direct(iter) => iter.size_hint(),
IterMutKind::Indexing(iter) => iter.size_hint(),
}
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
match self.iter {
IterMutKind::Direct(ref mut iter) => iter.nth(n),
IterMutKind::Indexing(ref mut iter) => {
iter.base.step_by(n);
iter.next()
}
}
}
}
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
impl<'a, T> FusedIterator for IterMut<'a, T> {}
struct IndexingIterMut<'a, T> {
base: IndexingIterBase,
data: &'a mut [T],
}
impl<'a, T> IndexingIterMut<'a, T> {
fn new<L: Layout>(view: MutViewRef<'a, '_, T, L>) -> IndexingIterMut<'a, T> {
assert!(
!view.layout.is_broadcast(),
"Cannot mutably iterate over broadcasting view"
);
IndexingIterMut {
base: IndexingIterBase::new(view.layout),
data: view.data,
}
}
}
impl<'a, T> Iterator for IndexingIterMut<'a, T> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.base.len == 0 {
return None;
}
let element = unsafe {
let el = &mut self.data[self.base.offset as usize];
std::mem::transmute::<&'_ mut T, &'a mut T>(el)
};
self.base.step();
Some(element)
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.base.len, Some(self.base.len))
}
}
impl<'a, T> ExactSizeIterator for IndexingIterMut<'a, T> {}
impl<'a, T> FusedIterator for IndexingIterMut<'a, T> {}
pub struct Offsets {
base: IndexingIterBase,
}
impl Offsets {
pub fn new<L: Layout>(layout: &L) -> Offsets {
Offsets {
base: IndexingIterBase::new(layout),
}
}
pub fn broadcast<L: Layout>(layout: &L, shape: &[usize]) -> Offsets {
Offsets {
base: IndexingIterBase::broadcast(layout, shape),
}
}
pub fn slice<L: Layout>(layout: &L, range: &[SliceItem]) -> Offsets {
Offsets {
base: IndexingIterBase::slice(layout, range),
}
}
}
impl Iterator for Offsets {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.base.len == 0 {
return None;
}
let offset = self.base.offset;
self.base.step();
Some(offset as usize)
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.base.len, Some(self.base.len))
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.base.step_by(n);
self.next()
}
}
impl ExactSizeIterator for Offsets {}
impl FusedIterator for Offsets {}
pub struct BroadcastIter<'a, T> {
iter: BroadcastIterKind<'a, T>,
}
enum BroadcastIterKind<'a, T> {
Direct(Take<Cycle<slice::Iter<'a, T>>>),
Indexing(IndexingIter<'a, T>),
}
fn can_broadcast_by_cycling(from_shape: &[usize], to_shape: &[usize]) -> bool {
assert!(to_shape.len() >= from_shape.len());
let excess_dims = to_shape.len() - from_shape.len();
let mut dims_to_check = to_shape.len() - excess_dims;
while dims_to_check > 0 {
if from_shape[dims_to_check - 1] != to_shape[excess_dims + dims_to_check - 1] {
break;
}
dims_to_check -= 1;
}
while dims_to_check > 0 {
if from_shape[dims_to_check - 1] != 1 {
return false;
}
dims_to_check -= 1;
}
true
}
impl<'a, T> BroadcastIter<'a, T> {
pub(crate) fn new<L: Layout>(
view: ViewRef<'a, '_, T, L>,
to_shape: &[usize],
) -> BroadcastIter<'a, T> {
let tmp_view = view.clone();
let iter = match (
view.contiguous_data(),
can_broadcast_by_cycling(view.shape().as_ref(), to_shape),
) {
(Some(data), true) => {
let iter_len = to_shape.iter().product();
BroadcastIterKind::Direct(data.iter().cycle().take(iter_len))
}
_ => BroadcastIterKind::Indexing(IndexingIter::broadcast(tmp_view, to_shape)),
};
BroadcastIter { iter }
}
}
impl<'a, T> Iterator for BroadcastIter<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.iter {
BroadcastIterKind::Direct(ref mut iter) => iter.next(),
BroadcastIterKind::Indexing(ref mut iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.iter {
BroadcastIterKind::Direct(iter) => iter.size_hint(),
BroadcastIterKind::Indexing(iter) => iter.size_hint(),
}
}
}
impl<'a, T> ExactSizeIterator for BroadcastIter<'a, T> {}
impl<'a, T> FusedIterator for BroadcastIter<'a, T> {}
struct LaneRanges {
offsets: Offsets,
dim_size: usize,
dim_stride: usize,
}
impl LaneRanges {
fn new<L: Layout>(layout: &L, dim: usize) -> LaneRanges {
let slice_starts: Vec<SliceItem> = (0..layout.ndim())
.map(|i| {
if i == dim {
(0..1).into()
} else {
(0..(layout.size(i) as isize)).into()
}
})
.collect();
let offsets = Offsets::slice(layout, &slice_starts);
LaneRanges {
offsets,
dim_size: layout.size(dim),
dim_stride: layout.stride(dim),
}
}
}
impl Iterator for LaneRanges {
type Item = Range<usize>;
fn next(&mut self) -> Option<Range<usize>> {
self.offsets.next().map(|offset| {
offset..offset + (self.dim_size - 1) * self.dim_stride + 1
})
}
}
pub struct Lanes<'a, T> {
data: &'a [T],
ranges: LaneRanges,
}
pub struct Lane<'a, T> {
inner: StepBy<std::slice::Iter<'a, T>>,
}
impl<'a, T> Iterator for Lane<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a, T> ExactSizeIterator for Lane<'a, T> {}
impl<'a, T> Lanes<'a, T> {
pub(crate) fn new<L: Layout>(view: ViewRef<'a, '_, T, L>, dim: usize) -> Lanes<'a, T> {
Lanes {
data: view.data,
ranges: LaneRanges::new(view.layout, dim),
}
}
}
impl<'a, T> Iterator for Lanes<'a, T> {
type Item = Lane<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
self.ranges.next().map(|range| {
let slice = &self.data[range];
Lane {
inner: slice.iter().step_by(self.ranges.dim_stride),
}
})
}
}
pub struct LanesMut<'a, T> {
data: &'a mut [T],
ranges: LaneRanges,
}
impl<'a, T> LanesMut<'a, T> {
pub(crate) fn new<L: Layout>(view: MutViewRef<'a, '_, T, L>, dim: usize) -> LanesMut<'a, T> {
assert!(
!view.layout.is_broadcast(),
"Cannot mutably iterate over broadcasting view"
);
LanesMut {
ranges: LaneRanges::new(view.layout, dim),
data: view.data,
}
}
}
pub struct LaneMut<'a, T> {
inner: StepBy<std::slice::IterMut<'a, T>>,
}
impl<'a, T> Iterator for LaneMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a, T> ExactSizeIterator for LaneMut<'a, T> {}
impl<'a, T> Iterator for LanesMut<'a, T> {
type Item = LaneMut<'a, T>;
fn next(&mut self) -> Option<LaneMut<'a, T>> {
self.ranges.next().map(|range| {
let slice = unsafe {
let slice = &mut self.data[range];
std::mem::transmute::<&mut [T], &'a mut [T]>(slice)
};
LaneMut {
inner: slice.iter_mut().step_by(self.ranges.dim_stride),
}
})
}
}
pub struct InnerIter<'a, T, L: MutLayout, const N: usize> {
outer_indices: DynIndices,
view: TensorBase<T, &'a [T], L>,
}
impl<'a, T, L: MutLayout, const N: usize> InnerIter<'a, T, L, N> {
pub fn new(view: TensorBase<T, &'a [T], L>) -> Self {
assert!(view.ndim() >= N);
let outer_dims = view.ndim() - N;
let outer_indices = DynIndices::from_shape(&view.shape().as_ref()[..outer_dims]);
InnerIter {
outer_indices,
view,
}
}
}
impl<'a, T, L: MutLayout, const N: usize> Iterator for InnerIter<'a, T, L, N> {
type Item = NdTensorView<'a, T, N>;
fn next(&mut self) -> Option<Self::Item> {
self.outer_indices.next().map(|idx| {
let slice_items = to_slice_items(&idx);
self.view.slice(slice_items.as_slice())
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.outer_indices.size_hint()
}
}
impl<'a, T, L: MutLayout, const N: usize> ExactSizeIterator for InnerIter<'a, T, L, N> {}
pub struct InnerIterMut<'a, T, L: MutLayout, const N: usize> {
outer_indices: DynIndices,
view: TensorBase<T, &'a mut [T], L>,
}
impl<'a, T, L: MutLayout, const N: usize> InnerIterMut<'a, T, L, N> {
pub fn new(view: TensorBase<T, &'a mut [T], L>) -> Self {
assert!(view.ndim() >= N);
let outer_dims = view.ndim() - N;
let outer_indices = DynIndices::from_shape(&view.shape().as_ref()[..outer_dims]);
InnerIterMut {
outer_indices,
view,
}
}
}
impl<'a, T, L: MutLayout, const N: usize> Iterator for InnerIterMut<'a, T, L, N> {
type Item = NdTensorViewMut<'a, T, N>;
fn next(&mut self) -> Option<Self::Item> {
self.outer_indices.next().map(|idx| {
let slice_items = to_slice_items(&idx);
let view: NdTensorViewMut<'_, T, N> = self.view.slice_mut(slice_items.as_slice());
unsafe {
std::mem::transmute::<NdTensorViewMut<'_, T, N>, NdTensorViewMut<'a, T, N>>(view)
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.outer_indices.size_hint()
}
}
impl<'a, T, L: MutLayout, const N: usize> ExactSizeIterator for InnerIterMut<'a, T, L, N> {}
pub struct AxisIter<'a, T, L: MutLayout> {
view: TensorBase<T, &'a [T], L>,
index: usize,
}
impl<'a, T, L: MutLayout> AxisIter<'a, T, L> {
pub fn new(view: &TensorBase<T, &'a [T], L>, dim: usize) -> AxisIter<'a, T, L> {
let mut permuted = view.clone();
permuted.move_axis(dim, 0);
AxisIter {
view: permuted,
index: 0,
}
}
}
impl<'a, T, L: MutLayout> Iterator for AxisIter<'a, T, L> {
type Item = TensorView<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
if self.index >= self.view.size(0) {
None
} else {
let view = self.view.slice_dyn([self.index]);
self.index += 1;
Some(view)
}
}
}
pub struct AxisIterMut<'a, T, L: MutLayout> {
view: TensorBase<T, &'a mut [T], L>,
index: usize,
}
impl<'a, T, L: MutLayout> AxisIterMut<'a, T, L> {
pub fn new(mut view: TensorBase<T, &'a mut [T], L>, dim: usize) -> AxisIterMut<'a, T, L> {
assert!(
!view.layout().is_broadcast(),
"Cannot mutably iterate over broadcasting view"
);
view.move_axis(dim, 0);
AxisIterMut { view, index: 0 }
}
}
impl<'a, T, L: MutLayout> Iterator for AxisIterMut<'a, T, L> {
type Item = TensorViewMut<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
if self.index >= self.view.size(0) {
None
} else {
let index = self.index;
self.index += 1;
let view = unsafe {
let view = self.view.slice_mut_dyn([index]);
std::mem::transmute::<TensorViewMut<'_, T>, TensorViewMut<'a, T>>(view)
};
Some(view)
}
}
}
pub struct AxisChunks<'a, T, L: MutLayout> {
view: TensorBase<T, &'a [T], L>,
index: usize,
chunk_size: usize,
}
impl<'a, T, L: MutLayout> AxisChunks<'a, T, L> {
pub fn new(
view: &TensorBase<T, &'a [T], L>,
dim: usize,
chunk_size: usize,
) -> AxisChunks<'a, T, L> {
let mut permuted = view.clone();
permuted.move_axis(dim, 0);
AxisChunks {
view: permuted,
index: 0,
chunk_size,
}
}
}
impl<'a, T, L: MutLayout> Iterator for AxisChunks<'a, T, L> {
type Item = TensorView<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
let size = self.view.size(0);
if self.index >= size {
None
} else {
let view = self
.view
.slice_dyn(self.index..self.index.add(self.chunk_size).min(size));
self.index += self.chunk_size;
Some(view)
}
}
}
pub struct AxisChunksMut<'a, T, L: MutLayout> {
view: TensorBase<T, &'a mut [T], L>,
index: usize,
chunk_size: usize,
}
impl<'a, T, L: MutLayout> AxisChunksMut<'a, T, L> {
pub fn new(
mut view: TensorBase<T, &'a mut [T], L>,
dim: usize,
chunk_size: usize,
) -> AxisChunksMut<'a, T, L> {
assert!(
!view.layout().is_broadcast(),
"Cannot mutably iterate over broadcasting view"
);
view.move_axis(dim, 0);
AxisChunksMut {
view,
chunk_size,
index: 0,
}
}
}
impl<'a, T, L: MutLayout> Iterator for AxisChunksMut<'a, T, L> {
type Item = TensorViewMut<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
let size = self.view.size(0);
if self.index >= size {
None
} else {
let index = self.index;
self.index += self.chunk_size;
let view = unsafe {
let view = self
.view
.slice_mut_dyn(index..index.add(self.chunk_size).min(size));
std::mem::transmute::<TensorViewMut<'_, T>, TensorViewMut<'a, T>>(view)
};
Some(view)
}
}
}
#[cfg(test)]
mod tests {
use crate::{AsView, Lanes, LanesMut, Tensor};
#[test]
fn test_lanes_empty() {
let x = Tensor::<i32>::zeros(&[5, 0]);
assert!(Lanes::new(x.view().view_ref(), 0).next().is_none());
assert!(Lanes::new(x.view().view_ref(), 1).next().is_none());
}
#[test]
fn test_lanes_mut_empty() {
let mut x = Tensor::<i32>::zeros(&[5, 0]);
assert!(LanesMut::new(x.mut_view_ref(), 0).next().is_none());
assert!(LanesMut::new(x.mut_view_ref(), 1).next().is_none());
}
}