#![crate_name="ndarray"]
#![cfg_attr(has_deprecated, feature(deprecated))]
#![doc(html_root_url = "http://bluss.github.io/rust-ndarray/master/")]
#![cfg_attr(feature = "assign_ops", feature(augmented_assignments,
op_assign_traits))]
#[cfg(feature = "serde")]
extern crate serde;
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize as serialize;
#[cfg(feature = "rblas")]
extern crate rblas;
extern crate itertools;
extern crate num as libnum;
use libnum::Float;
use libnum::Complex;
use std::cmp;
use std::mem;
use std::ops::{Add, Sub, Mul, Div, Rem, Neg, Not, Shr, Shl,
BitAnd,
BitOr,
BitXor,
};
use std::rc::Rc;
use std::slice::{self, Iter, IterMut};
use std::marker::PhantomData;
use itertools::ZipSlices;
use itertools::free::enumerate;
pub use dimension::{
Dimension,
RemoveAxis,
Axis,
};
use dimension::stride_offset;
pub use dimension::NdIndex;
pub use indexes::Indexes;
pub use shape_error::ShapeError;
pub use stride_error::StrideError;
pub use si::{Si, S};
use iterators::Baseiter;
pub use iterators::{
InnerIter,
InnerIterMut,
OuterIter,
OuterIterMut,
AxisChunksIter,
AxisChunksIterMut,
};
pub use linalg::LinalgScalar;
mod arraytraits;
#[cfg(feature = "serde")]
mod arrayserialize;
mod arrayformat;
#[cfg(feature = "rblas")]
pub mod blas;
mod dimension;
mod indexes;
mod iterators;
mod linalg;
mod linspace;
mod numeric_util;
mod si;
mod shape_error;
mod stride_error;
mod imp_prelude {
pub use {
ArrayBase,
ArrayView,
ArrayViewMut,
OwnedArray,
RcArray,
Ix, Ixs,
Dimension,
Data,
DataMut,
DataOwned,
};
#[derive(Copy, Clone, Debug)]
pub struct Priv<T>(pub T);
}
pub type Ix = usize;
pub type Ixs = isize;
pub struct ArrayBase<S, D>
where S: Data
{
data: S,
ptr: *mut S::Elem,
dim: D,
strides: D,
}
pub unsafe trait Data {
type Elem;
fn slice(&self) -> &[Self::Elem];
}
pub unsafe trait DataMut : Data {
fn slice_mut(&mut self) -> &mut [Self::Elem];
#[inline]
fn ensure_unique<D>(&mut ArrayBase<Self, D>)
where Self: Sized,
D: Dimension
{ }
#[inline]
fn is_unique(&mut self) -> bool {
true
}
}
pub unsafe trait DataClone : Data {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem);
}
unsafe impl<A> Data for Rc<Vec<A>> {
type Elem = A;
fn slice(&self) -> &[A] {
self
}
}
unsafe impl<A> DataMut for Rc<Vec<A>>
where A: Clone
{
fn slice_mut(&mut self) -> &mut [A] {
&mut Rc::make_mut(self)[..]
}
fn ensure_unique<D>(self_: &mut ArrayBase<Self, D>)
where Self: Sized,
D: Dimension
{
if Rc::get_mut(&mut self_.data).is_some() {
return;
}
if self_.dim.size() <= self_.data.len() / 2 {
unsafe {
*self_ = ArrayBase::from_vec_dim_unchecked(self_.dim.clone(),
self_.iter()
.cloned()
.collect());
}
return;
}
let our_off = (self_.ptr as isize - self_.data.as_ptr() as isize) /
mem::size_of::<A>() as isize;
let rvec = Rc::make_mut(&mut self_.data);
unsafe {
self_.ptr = rvec.as_mut_ptr().offset(our_off);
}
}
fn is_unique(&mut self) -> bool {
Rc::get_mut(self).is_some()
}
}
unsafe impl<A> DataClone for Rc<Vec<A>> {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem) {
(self.clone(), ptr)
}
}
unsafe impl<A> Data for Vec<A> {
type Elem = A;
fn slice(&self) -> &[A] {
self
}
}
unsafe impl<A> DataMut for Vec<A> {
fn slice_mut(&mut self) -> &mut [A] {
self
}
}
unsafe impl<A> DataClone for Vec<A>
where A: Clone
{
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem) {
let mut u = self.clone();
let our_off = (self.as_ptr() as isize - ptr as isize) /
mem::size_of::<A>() as isize;
let new_ptr = u.as_mut_ptr().offset(our_off);
(u, new_ptr)
}
}
unsafe impl<'a, A> Data for ViewRepr<&'a A> {
type Elem = A;
fn slice(&self) -> &[A] {
&[]
}
}
unsafe impl<'a, A> DataClone for ViewRepr<&'a A> {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem) {
(*self, ptr)
}
}
unsafe impl<'a, A> Data for ViewRepr<&'a mut A> {
type Elem = A;
fn slice(&self) -> &[A] {
&[]
}
}
unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> {
fn slice_mut(&mut self) -> &mut [A] {
&mut []
}
}
pub unsafe trait DataOwned : Data {
fn new(elements: Vec<Self::Elem>) -> Self;
fn into_shared(self) -> Rc<Vec<Self::Elem>>;
}
pub unsafe trait DataShared : Clone + DataClone { }
unsafe impl<A> DataShared for Rc<Vec<A>> {}
unsafe impl<'a, A> DataShared for ViewRepr<&'a A> {}
unsafe impl<A> DataOwned for Vec<A> {
fn new(elements: Vec<A>) -> Self {
elements
}
fn into_shared(self) -> Rc<Vec<A>> {
Rc::new(self)
}
}
unsafe impl<A> DataOwned for Rc<Vec<A>> {
fn new(elements: Vec<A>) -> Self {
Rc::new(elements)
}
fn into_shared(self) -> Rc<Vec<A>> {
self
}
}
pub type RcArray<A, D> = ArrayBase<Rc<Vec<A>>, D>;
#[cfg_attr(has_deprecated, deprecated(note="`Array` is deprecated! Renamed to `RcArray`."))]
pub type Array<A, D> = ArrayBase<Rc<Vec<A>>, D>;
pub type OwnedArray<A, D> = ArrayBase<Vec<A>, D>;
pub type ArrayView<'a, A, D> = ArrayBase<ViewRepr<&'a A>, D>;
pub type ArrayViewMut<'a, A, D> = ArrayBase<ViewRepr<&'a mut A>, D>;
#[derive(Copy, Clone)]
pub struct ViewRepr<A> {
life: PhantomData<A>,
}
impl<A> ViewRepr<A> {
#[inline(always)]
fn new() -> Self {
ViewRepr { life: PhantomData }
}
}
impl<S: DataClone, D: Clone> Clone for ArrayBase<S, D> {
fn clone(&self) -> ArrayBase<S, D> {
unsafe {
let (data, ptr) = self.data.clone_with_ptr(self.ptr);
ArrayBase {
data: data,
ptr: ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
}
}
}
}
impl<S: DataClone + Copy, D: Copy> Copy for ArrayBase<S, D> {}
impl<S> ArrayBase<S, Ix>
where S: DataOwned
{
pub fn from_vec(v: Vec<S::Elem>) -> ArrayBase<S, Ix> {
unsafe { Self::from_vec_dim_unchecked(v.len() as Ix, v) }
}
pub fn from_iter<I: IntoIterator<Item=S::Elem>>(iterable: I) -> ArrayBase<S, Ix> {
Self::from_vec(iterable.into_iter().collect())
}
pub fn linspace<F>(start: F, end: F, n: usize) -> ArrayBase<S, Ix>
where S: Data<Elem=F>,
F: libnum::Float,
{
Self::from_iter(linspace::linspace(start, end, n))
}
}
impl<S, A> ArrayBase<S, (Ix, Ix)>
where S: DataOwned<Elem=A>,
{
pub fn eye(n: Ix) -> ArrayBase<S, (Ix, Ix)>
where S: DataMut,
A: Clone + libnum::Zero + libnum::One,
{
let mut eye = Self::zeros((n, n));
for a_ii in eye.diag_mut() {
*a_ii = A::one();
}
eye
}
}
impl<S, A, D> ArrayBase<S, D>
where S: DataOwned<Elem=A>,
D: Dimension,
{
pub fn from_elem(dim: D, elem: A) -> ArrayBase<S, D>
where A: Clone
{
let size = dim.size_checked().expect("Shape too large: overflow in size");
let v = vec![elem; size];
unsafe { Self::from_vec_dim_unchecked(dim, v) }
}
pub fn from_elem_f(dim: D, elem: A) -> ArrayBase<S, D>
where A: Clone
{
let size = dim.size_checked().expect("Shape too large: overflow in size");
let v = vec![elem; size];
unsafe { Self::from_vec_dim_unchecked_f(dim, v) }
}
pub fn zeros(dim: D) -> ArrayBase<S, D>
where A: Clone + libnum::Zero
{
Self::from_elem(dim, libnum::zero())
}
pub fn zeros_f(dim: D) -> ArrayBase<S, D>
where A: Clone + libnum::Zero
{
Self::from_elem_f(dim, libnum::zero())
}
pub fn default(dim: D) -> ArrayBase<S, D>
where A: Default
{
let v = (0..dim.size()).map(|_| A::default()).collect();
unsafe { Self::from_vec_dim_unchecked(dim, v) }
}
pub fn from_vec_dim(dim: D, v: Vec<A>) -> Result<ArrayBase<S, D>, ShapeError> {
if dim.size_checked() != Some(v.len()) {
return Err(shape_error::incompatible_shapes(&v.len(), &dim));
}
unsafe { Ok(Self::from_vec_dim_unchecked(dim, v)) }
}
pub unsafe fn from_vec_dim_unchecked(dim: D, mut v: Vec<A>) -> ArrayBase<S, D> {
debug_assert!(dim.size_checked() == Some(v.len()));
ArrayBase {
ptr: v.as_mut_ptr(),
data: DataOwned::new(v),
strides: dim.default_strides(),
dim: dim,
}
}
pub unsafe fn from_vec_dim_unchecked_f(dim: D, mut v: Vec<A>) -> ArrayBase<S, D> {
debug_assert!(dim.size_checked() == Some(v.len()));
ArrayBase {
ptr: v.as_mut_ptr(),
data: DataOwned::new(v),
strides: dim.fortran_strides(),
dim: dim,
}
}
pub fn from_vec_dim_stride(dim: D, strides: D, v: Vec<A>)
-> Result<ArrayBase<S, D>, StrideError>
{
dimension::can_index_slice(&v, &dim, &strides).map(|_| {
unsafe {
Self::from_vec_dim_stride_unchecked(dim, strides, v)
}
})
}
pub unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec<A>)
-> ArrayBase<S, D>
{
debug_assert!(dimension::can_index_slice(&v, &dim, &strides).is_ok());
ArrayBase {
ptr: v.as_mut_ptr(),
data: DataOwned::new(v),
strides: strides,
dim: dim
}
}
}
impl<A, S, D> ArrayBase<S, D> where S: Data<Elem=A>, D: Dimension
{
pub fn len(&self) -> usize {
self.dim.size()
}
pub fn dim(&self) -> D {
self.dim.clone()
}
pub fn shape(&self) -> &[Ix] {
self.dim.slice()
}
pub fn strides(&self) -> &[Ixs] {
let s = self.strides.slice();
unsafe {
slice::from_raw_parts(s.as_ptr() as *const _, s.len())
}
}
pub fn ndim(&self) -> usize {
self.dim.ndim()
}
pub fn view(&self) -> ArrayView<A, D> {
debug_assert!(self.pointer_is_inbounds());
unsafe {
ArrayView::new_(self.ptr, self.dim.clone(), self.strides.clone())
}
}
pub fn view_mut(&mut self) -> ArrayViewMut<A, D>
where S: DataMut,
{
self.ensure_unique();
unsafe {
ArrayViewMut::new_(self.ptr, self.dim.clone(), self.strides.clone())
}
}
pub fn to_owned(&self) -> OwnedArray<A, D>
where A: Clone
{
let data = if let Some(slc) = self.as_slice() {
slc.to_vec()
} else {
self.iter().cloned().collect()
};
unsafe {
ArrayBase::from_vec_dim_unchecked(self.dim.clone(), data)
}
}
pub fn to_shared(&self) -> RcArray<A, D>
where A: Clone
{
self.to_owned().into_shared()
}
pub fn into_shared(self) -> RcArray<A, D>
where S: DataOwned,
{
let data = self.data.into_shared();
ArrayBase {
data: data,
ptr: self.ptr,
dim: self.dim,
strides: self.strides,
}
}
pub fn iter(&self) -> Elements<A, D> {
debug_assert!(self.pointer_is_inbounds());
self.view().into_iter_()
}
pub fn iter_mut(&mut self) -> ElementsMut<A, D>
where S: DataMut,
{
self.ensure_unique();
self.view_mut().into_iter_()
}
pub fn indexed_iter(&self) -> Indexed<A, D> {
Indexed(self.view().into_elements_base())
}
pub fn indexed_iter_mut(&mut self) -> IndexedMut<A, D>
where S: DataMut,
{
IndexedMut(self.view_mut().into_elements_base())
}
pub fn slice(&self, indexes: &D::SliceArg) -> ArrayView<A, D> {
let mut arr = self.view();
arr.islice(indexes);
arr
}
pub fn slice_mut(&mut self, indexes: &D::SliceArg) -> ArrayViewMut<A, D>
where S: DataMut
{
let mut arr = self.view_mut();
arr.islice(indexes);
arr
}
pub fn islice(&mut self, indexes: &D::SliceArg) {
let offset = Dimension::do_slices(&mut self.dim, &mut self.strides, indexes);
unsafe {
self.ptr = self.ptr.offset(offset);
}
debug_assert!(self.pointer_is_inbounds());
}
pub fn get<I>(&self, index: I) -> Option<&A>
where I: NdIndex<Dim=D>,
{
let ptr = self.ptr;
index.index_checked(&self.dim, &self.strides)
.map(move |offset| unsafe { &*ptr.offset(offset) })
}
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut A>
where S: DataMut,
I: NdIndex<Dim=D>,
{
self.ensure_unique();
let ptr = self.ptr;
index.index_checked(&self.dim, &self.strides)
.map(move |offset| unsafe { &mut *ptr.offset(offset) })
}
#[inline]
pub unsafe fn uget(&self, index: D) -> &A {
debug_assert!(self.dim
.stride_offset_checked(&self.strides, &index)
.is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&*self.ptr.offset(off)
}
#[inline]
pub unsafe fn uget_mut(&mut self, index: D) -> &mut A
where S: DataMut
{
debug_assert!(self.data.is_unique());
debug_assert!(self.dim
.stride_offset_checked(&self.strides, &index)
.is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&mut *self.ptr.offset(off)
}
pub fn subview(&self, axis: Axis, index: Ix)
-> ArrayView<A, <D as RemoveAxis>::Smaller>
where D: RemoveAxis,
{
self.view().into_subview(axis, index)
}
pub fn subview_mut(&mut self, axis: Axis, index: Ix)
-> ArrayViewMut<A, D::Smaller>
where S: DataMut,
D: RemoveAxis,
{
self.view_mut().into_subview(axis, index)
}
pub fn isubview(&mut self, axis: Axis, index: Ix) {
dimension::do_sub(&mut self.dim, &mut self.ptr, &self.strides,
axis.axis(), index)
}
pub fn into_subview(mut self, axis: Axis, index: Ix)
-> ArrayBase<S, <D as RemoveAxis>::Smaller>
where D: RemoveAxis,
{
self.isubview(axis, index);
let axis = axis.axis();
ArrayBase {
data: self.data,
ptr: self.ptr,
dim: self.dim.remove_axis(axis),
strides: self.strides.remove_axis(axis),
}
}
pub fn inner_iter(&self) -> InnerIter<A, D> {
iterators::new_inner_iter(self.view())
}
pub fn inner_iter_mut(&mut self) -> InnerIterMut<A, D>
where S: DataMut
{
iterators::new_inner_iter_mut(self.view_mut())
}
#[allow(deprecated)]
pub fn outer_iter(&self) -> OuterIter<A, D::Smaller>
where D: RemoveAxis,
{
self.view().into_outer_iter()
}
#[allow(deprecated)]
pub fn outer_iter_mut(&mut self) -> OuterIterMut<A, D::Smaller>
where S: DataMut,
D: RemoveAxis,
{
self.view_mut().into_outer_iter()
}
pub fn axis_iter(&self, axis: Axis) -> OuterIter<A, D::Smaller>
where D: RemoveAxis,
{
iterators::new_axis_iter(self.view(), axis.axis())
}
pub fn axis_iter_mut(&mut self, axis: Axis) -> OuterIterMut<A, D::Smaller>
where S: DataMut,
D: RemoveAxis,
{
iterators::new_axis_iter_mut(self.view_mut(), axis.axis())
}
pub fn axis_chunks_iter(&self, axis: Axis, size: usize) -> AxisChunksIter<A, D> {
iterators::new_chunk_iter(self.view(), axis.axis(), size)
}
pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize)
-> AxisChunksIterMut<A, D>
where S: DataMut
{
iterators::new_chunk_iter_mut(self.view_mut(), axis.axis(), size)
}
fn diag_params(&self) -> (Ix, Ixs) {
let len = self.dim.slice().iter().cloned().min().unwrap_or(1);
let stride = self.strides()
.iter()
.fold(0, |sum, s| sum + s);
(len, stride)
}
pub fn diag(&self) -> ArrayView<A, Ix> {
self.view().into_diag()
}
pub fn diag_mut(&mut self) -> ArrayViewMut<A, Ix>
where S: DataMut,
{
self.view_mut().into_diag()
}
pub fn into_diag(self) -> ArrayBase<S, Ix> {
let (len, stride) = self.diag_params();
ArrayBase {
data: self.data,
ptr: self.ptr,
dim: len,
strides: stride as Ix,
}
}
fn ensure_unique(&mut self)
where S: DataMut
{
debug_assert!(self.pointer_is_inbounds());
S::ensure_unique(self);
debug_assert!(self.pointer_is_inbounds());
}
#[cfg(feature = "rblas")]
fn ensure_standard_layout(&mut self)
where S: DataOwned,
A: Clone
{
if !self.is_standard_layout() {
let mut v: Vec<A> = self.iter().cloned().collect();
self.ptr = v.as_mut_ptr();
self.data = DataOwned::new(v);
self.strides = self.dim.default_strides();
}
}
pub fn is_standard_layout(&self) -> bool {
let defaults = self.dim.default_strides();
if self.strides == defaults {
return true;
}
for (&dim, (&s, &ds)) in zipsl(self.dim.slice(),
zipsl(self.strides(), defaults.slice()))
{
if dim != 1 && s != (ds as Ixs) {
return false;
}
}
true
}
#[cfg(feature = "rblas")]
fn is_inner_contiguous(&self) -> bool {
let ndim = self.ndim();
if ndim == 0 {
return true;
}
self.shape()[ndim - 1] <= 1 || self.strides()[ndim - 1] == 1
}
pub fn as_slice(&self) -> Option<&[A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
pub fn as_slice_mut(&mut self) -> Option<&mut [A]>
where S: DataMut
{
if self.is_standard_layout() {
self.ensure_unique();
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
pub fn reshape<E>(&self, shape: E) -> ArrayBase<S, E>
where S: DataShared + DataOwned,
A: Clone,
E: Dimension,
{
if shape.size_checked() != Some(self.dim.size()) {
panic!("Incompatible shapes in reshape, attempted from: {:?}, to: {:?}",
self.dim.slice(),
shape.slice())
}
if self.is_standard_layout() {
let cl = self.clone();
ArrayBase {
data: cl.data,
ptr: cl.ptr,
strides: shape.default_strides(),
dim: shape,
}
} else {
let v = self.iter().map(|x| x.clone()).collect::<Vec<A>>();
unsafe {
ArrayBase::from_vec_dim_unchecked(shape, v)
}
}
}
pub fn into_shape<E>(self, shape: E) -> Result<ArrayBase<S, E>, ShapeError>
where E: Dimension
{
if shape.size_checked() != Some(self.dim.size()) {
return Err(shape_error::incompatible_shapes(&self.dim, &shape));
}
if self.is_standard_layout() {
Ok(ArrayBase {
data: self.data,
ptr: self.ptr,
strides: shape.default_strides(),
dim: shape,
})
} else {
Err(ShapeError::IncompatibleLayout)
}
}
pub fn broadcast<E>(&self, dim: E) -> Option<ArrayView<A, E>>
where E: Dimension
{
fn upcast<D: Dimension, E: Dimension>(to: &D, from: &E, stride: &E) -> Option<D> {
let mut new_stride = to.clone();
if to.ndim() < from.ndim() {
return None;
}
{
let mut new_stride_iter = new_stride.slice_mut().iter_mut().rev();
for ((er, es), dr) in from.slice().iter().rev()
.zip(stride.slice().iter().rev())
.zip(new_stride_iter.by_ref())
{
if *dr == *er {
*dr = *es;
} else if *er == 1 {
*dr = 0
} else {
return None;
}
}
for dr in new_stride_iter {
*dr = 0;
}
}
Some(new_stride)
}
let broadcast_strides = match upcast(&dim, &self.dim, &self.strides) {
Some(st) => st,
None => return None,
};
unsafe { Some(ArrayView::new_(self.ptr, dim, broadcast_strides)) }
}
#[inline]
fn broadcast_unwrap<E>(&self, dim: E) -> ArrayView<A, E>
where E: Dimension,
{
#[cold]
#[inline(never)]
fn broadcast_panic<D, E>(from: &D, to: &E) -> !
where D: Dimension,
E: Dimension,
{
panic!("Could not broadcast array from shape: {:?} to: {:?}",
from.slice(), to.slice())
}
match self.broadcast(dim.clone()) {
Some(it) => it,
None => broadcast_panic(&self.dim, &dim),
}
}
pub fn swap_axes(&mut self, ax: usize, bx: usize) {
self.dim.slice_mut().swap(ax, bx);
self.strides.slice_mut().swap(ax, bx);
}
pub fn reversed_axes(mut self) -> ArrayBase<S, D> {
self.dim.slice_mut().reverse();
self.strides.slice_mut().reverse();
self
}
pub fn raw_data(&self) -> &[A] {
self.data.slice()
}
pub fn raw_data_mut(&mut self) -> &mut [A]
where S: DataMut,
{
self.ensure_unique();
self.data.slice_mut()
}
fn pointer_is_inbounds(&self) -> bool {
let slc = self.data.slice();
if slc.is_empty() {
return true;
}
let ptr = slc.as_ptr() as *mut _;
let end = unsafe {
ptr.offset(slc.len() as isize)
};
self.ptr >= ptr && self.ptr <= end
}
pub fn assign<E: Dimension, S2>(&mut self, rhs: &ArrayBase<S2, E>)
where S: DataMut,
A: Clone,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| *x = y.clone());
}
pub fn assign_scalar(&mut self, x: &A)
where S: DataMut, A: Clone,
{
self.unordered_foreach_mut(move |elt| *elt = x.clone());
}
fn unordered_foreach_mut<F>(&mut self, mut f: F)
where S: DataMut,
F: FnMut(&mut A)
{
if let Some(slc) = self.as_slice_mut() {
for elt in slc {
f(elt);
}
return;
}
for row in self.inner_iter_mut() {
for elt in row {
f(elt);
}
}
}
fn zip_with_mut_same_shape<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
if let Some(self_s) = self.as_slice_mut() {
if let Some(rhs_s) = rhs.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
return;
}
}
self.zip_with_mut_outer_iter(rhs, f);
}
#[inline(always)]
fn zip_with_mut_outer_iter<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
let mut try_slices = true;
let mut rows = self.inner_iter_mut().zip(rhs.inner_iter());
for (mut s_row, r_row) in &mut rows {
if try_slices {
if let Some(self_s) = s_row.as_slice_mut() {
if let Some(rhs_s) = r_row.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
continue;
}
}
try_slices = false;
}
unsafe {
for i in 0..s_row.len() {
f(s_row.uget_mut(i), r_row.uget(i))
}
}
}
}
fn zip_mut_with_elem<B, F>(&mut self, rhs_elem: &B, mut f: F)
where S: DataMut,
F: FnMut(&mut A, &B)
{
self.unordered_foreach_mut(move |elt| f(elt, rhs_elem));
}
#[inline]
pub fn zip_mut_with<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
if rhs.dim.ndim() == 0 {
unsafe {
let rhs_elem = &*rhs.ptr;
self.zip_mut_with_elem(rhs_elem, f);
}
} else if self.dim.ndim() == rhs.dim.ndim() && self.shape() == rhs.shape() {
self.zip_with_mut_same_shape(rhs, f);
} else {
let rhs_broadcast = rhs.broadcast_unwrap(self.dim());
self.zip_with_mut_outer_iter(&rhs_broadcast, f);
}
}
pub fn fold<'a, F, B>(&'a self, mut init: B, mut f: F) -> B
where F: FnMut(B, &'a A) -> B, A: 'a
{
if let Some(slc) = self.as_slice() {
for elt in slc {
init = f(init, elt);
}
return init;
}
for row in self.inner_iter() {
for elt in row {
init = f(init, elt);
}
}
init
}
pub fn map<'a, B, F>(&'a self, mut f: F) -> OwnedArray<B, D>
where F: FnMut(&'a A) -> B,
A: 'a,
{
let mut res = Vec::with_capacity(self.dim.size());
for elt in self.iter() {
res.push(f(elt))
}
unsafe {
ArrayBase::from_vec_dim_unchecked(self.dim.clone(), res)
}
}
}
#[cfg_attr(has_deprecated, deprecated(note="Use `ArrayBase::zeros` instead."))]
pub fn zeros<A, D>(dim: D) -> OwnedArray<A, D>
where A: Clone + libnum::Zero, D: Dimension,
{
ArrayBase::zeros(dim)
}
pub fn arr0<A>(x: A) -> OwnedArray<A, ()>
{
unsafe { ArrayBase::from_vec_dim_unchecked((), vec![x]) }
}
pub fn arr1<A: Clone>(xs: &[A]) -> OwnedArray<A, Ix> {
ArrayBase::from_vec(xs.to_vec())
}
pub fn rcarr1<A: Clone>(xs: &[A]) -> RcArray<A, Ix> {
arr1(xs).into_shared()
}
pub fn aview0<A>(x: &A) -> ArrayView<A, ()> {
unsafe { ArrayView::new_(x, (), ()) }
}
pub fn aview1<A>(xs: &[A]) -> ArrayView<A, Ix> {
ArrayView::from_slice(xs)
}
pub fn aview2<A, V: FixedInitializer<Elem=A>>(xs: &[V]) -> ArrayView<A, (Ix, Ix)> {
let cols = V::len();
let rows = xs.len();
let data = unsafe {
std::slice::from_raw_parts(xs.as_ptr() as *const A, cols * rows)
};
let dim = (rows as Ix, cols as Ix);
unsafe {
let strides = dim.default_strides();
ArrayView::new_(data.as_ptr(), dim, strides)
}
}
pub fn aview_mut1<A>(xs: &mut [A]) -> ArrayViewMut<A, Ix> {
ArrayViewMut::from_slice(xs)
}
pub unsafe trait FixedInitializer {
type Elem;
fn as_init_slice(&self) -> &[Self::Elem];
fn len() -> usize;
}
macro_rules! impl_arr_init {
(__impl $n: expr) => (
unsafe impl<T> FixedInitializer for [T; $n] {
type Elem = T;
fn as_init_slice(&self) -> &[T] { self }
fn len() -> usize { $n }
}
);
() => ();
($n: expr, $($m:expr,)*) => (
impl_arr_init!(__impl $n);
impl_arr_init!($($m,)*);
)
}
impl_arr_init!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,);
pub fn arr2<A: Clone, V: FixedInitializer<Elem = A>>(xs: &[V]) -> OwnedArray<A, (Ix, Ix)> {
let (m, n) = (xs.len() as Ix,
xs.get(0).map_or(0, |snd| snd.as_init_slice().len() as Ix));
let dim = (m, n);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs {
let snd = snd.as_init_slice();
result.extend(snd.iter().cloned());
}
unsafe {
ArrayBase::from_vec_dim_unchecked(dim, result)
}
}
pub fn rcarr2<A: Clone, V: FixedInitializer<Elem = A>>(xs: &[V]) -> RcArray<A, (Ix, Ix)> {
arr2(xs).into_shared()
}
pub fn arr3<A: Clone, V: FixedInitializer<Elem=U>, U: FixedInitializer<Elem=A>>(xs: &[V])
-> OwnedArray<A, (Ix, Ix, Ix)>
{
let m = xs.len() as Ix;
let fst = xs.get(0).map(|snd| snd.as_init_slice());
let thr = fst.and_then(|elt| elt.get(0).map(|elt2| elt2.as_init_slice()));
let n = fst.map_or(0, |v| v.len() as Ix);
let o = thr.map_or(0, |v| v.len() as Ix);
let dim = (m, n, o);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs {
let snd = snd.as_init_slice();
for thr in snd.iter() {
let thr = thr.as_init_slice();
result.extend(thr.iter().cloned());
}
}
unsafe {
ArrayBase::from_vec_dim_unchecked(dim, result)
}
}
pub fn rcarr3<A: Clone, V: FixedInitializer<Elem=U>, U: FixedInitializer<Elem=A>>(xs: &[V])
-> RcArray<A, (Ix, Ix, Ix)>
{
arr3(xs).into_shared()
}
impl<A, S, D> ArrayBase<S, D>
where S: Data<Elem=A>,
D: Dimension,
{
pub fn sum(&self, axis: Axis) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: Clone + Add<Output=A>,
D: RemoveAxis,
{
let n = self.shape()[axis.axis()];
let mut res = self.subview(axis, 0).to_owned();
for i in 1..n {
let view = self.subview(axis, i);
res.iadd(&view);
}
res
}
pub fn scalar_sum(&self) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
if let Some(slc) = self.as_slice() {
return numeric_util::unrolled_sum(slc);
}
let mut sum = A::zero();
for row in self.inner_iter() {
if let Some(slc) = row.as_slice() {
sum = sum + numeric_util::unrolled_sum(slc);
} else {
sum = sum + row.fold(A::zero(), |acc, elt| acc + elt.clone());
}
}
sum
}
pub fn mean(&self, axis: Axis) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: LinalgScalar,
D: RemoveAxis,
{
let n = self.shape()[axis.axis()];
let mut sum = self.sum(axis);
let one = libnum::one::<A>();
let mut cnt = one;
for _ in 1..n {
cnt = cnt + one;
}
sum.idiv_scalar(&cnt);
sum
}
pub fn allclose<S2>(&self, rhs: &ArrayBase<S2, D>, tol: A) -> bool
where A: Float,
S2: Data<Elem=A>,
{
self.shape() == rhs.shape() &&
self.iter().zip(rhs.iter()).all(|(x, y)| (*x - *y).abs() <= tol)
}
}
impl<A, S> ArrayBase<S, Ix>
where S: Data<Elem=A>,
{
pub fn dot<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A
where S2: Data<Elem=A>,
A: LinalgScalar,
{
self.dot_impl(rhs)
}
fn dot_generic<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A
where S2: Data<Elem=A>,
A: LinalgScalar,
{
assert_eq!(self.len(), rhs.len());
if let Some(self_s) = self.as_slice() {
if let Some(rhs_s) = rhs.as_slice() {
return numeric_util::unrolled_dot(self_s, rhs_s);
}
}
let mut sum = A::zero();
for i in 0..self.len() {
unsafe {
sum = sum.clone() + self.uget(i).clone() * rhs.uget(i).clone();
}
}
sum
}
#[cfg(not(feature="rblas"))]
fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A
where S2: Data<Elem=A>,
A: LinalgScalar,
{
self.dot_generic(rhs)
}
#[cfg(feature="rblas")]
fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A
where S2: Data<Elem=A>,
A: LinalgScalar,
{
use std::any::{Any, TypeId};
use rblas::vector::ops::Dot;
use linalg::AsBlasAny;
fn cast_as<A: Any + Copy, B: Any + Copy>(a: &A) -> B {
assert_eq!(TypeId::of::<A>(), TypeId::of::<B>());
unsafe {
::std::ptr::read(a as *const _ as *const B)
}
}
if self.len() >= 32 {
assert_eq!(self.len(), rhs.len());
if let Ok(self_v) = self.blas_view_as_type::<f32>() {
if let Ok(rhs_v) = rhs.blas_view_as_type::<f32>() {
let f_ret = f32::dot(&self_v, &rhs_v);
return cast_as::<f32, A>(&f_ret);
}
}
if let Ok(self_v) = self.blas_view_as_type::<f64>() {
if let Ok(rhs_v) = rhs.blas_view_as_type::<f64>() {
let f_ret = f64::dot(&self_v, &rhs_v);
return cast_as::<f64, A>(&f_ret);
}
}
}
self.dot_generic(rhs)
}
}
impl<A, S> ArrayBase<S, (Ix, Ix)>
where S: Data<Elem=A>,
{
pub fn row(&self, index: Ix) -> ArrayView<A, Ix>
{
self.subview(Axis(0), index)
}
pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix>
where S: DataMut
{
self.subview_mut(Axis(0), index)
}
pub fn column(&self, index: Ix) -> ArrayView<A, Ix>
{
self.subview(Axis(1), index)
}
pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix>
where S: DataMut
{
self.subview_mut(Axis(1), index)
}
pub fn mat_mul(&self, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)>
where A: LinalgScalar,
{
let ((m, a), (b, n)) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, b);
assert!(self_columns == other_rows);
let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize);
unsafe {
res_elems.set_len(m as usize * n as usize);
}
let mut i = 0;
let mut j = 0;
for rr in &mut res_elems {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget((k, j))
);
}
j += 1;
if j == n {
j = 0;
i += 1;
}
}
unsafe {
ArrayBase::from_vec_dim_unchecked((m, n), res_elems)
}
}
pub fn mat_mul_col(&self, rhs: &ArrayBase<S, Ix>) -> OwnedArray<A, Ix>
where A: LinalgScalar,
{
let ((m, a), n) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, n);
assert!(self_columns == other_rows);
let mut res_elems = Vec::<A>::with_capacity(m as usize);
unsafe {
res_elems.set_len(m as usize);
}
for (i, rr) in enumerate(&mut res_elems) {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget(k)
);
}
}
unsafe {
ArrayBase::from_vec_dim_unchecked(m, res_elems)
}
}
}
macro_rules! impl_binary_op_inherent(
($trt:ident, $mth:ident, $imethod:ident, $imth_scalar:ident, $doc:expr) => (
#[doc=$doc]
pub fn $imethod <E: Dimension, S2> (&mut self, rhs: &ArrayBase<S2, E>)
where A: Clone + $trt<A, Output=A>,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| {
*x = x.clone().$mth(y.clone());
});
}
#[doc=$doc]
pub fn $imth_scalar (&mut self, x: &A)
where A: Clone + $trt<A, Output=A>,
{
self.unordered_foreach_mut(move |elt| {
*elt = elt.clone(). $mth (x.clone());
});
}
);
);
impl<A, S, D> ArrayBase<S, D>
where S: DataMut<Elem=A>,
D: Dimension,
{
impl_binary_op_inherent!(Add, add, iadd, iadd_scalar, "addition");
impl_binary_op_inherent!(Sub, sub, isub, isub_scalar, "subtraction");
impl_binary_op_inherent!(Mul, mul, imul, imul_scalar, "multiplication");
impl_binary_op_inherent!(Div, div, idiv, idiv_scalar, "division");
impl_binary_op_inherent!(Rem, rem, irem, irem_scalar, "remainder");
impl_binary_op_inherent!(BitAnd, bitand, ibitand, ibitand_scalar, "bit and");
impl_binary_op_inherent!(BitOr, bitor, ibitor, ibitor_scalar, "bit or");
impl_binary_op_inherent!(BitXor, bitxor, ibitxor, ibitxor_scalar, "bit xor");
impl_binary_op_inherent!(Shl, shl, ishl, ishl_scalar, "left shift");
impl_binary_op_inherent!(Shr, shr, ishr, ishr_scalar, "right shift");
pub fn ineg(&mut self)
where A: Clone + Neg<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().neg()
});
}
pub fn inot(&mut self)
where A: Clone + Not<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().not()
});
}
}
pub trait Scalar { }
impl Scalar for bool { }
impl Scalar for i8 { }
impl Scalar for u8 { }
impl Scalar for i16 { }
impl Scalar for u16 { }
impl Scalar for i32 { }
impl Scalar for u32 { }
impl Scalar for i64 { }
impl Scalar for u64 { }
impl Scalar for f32 { }
impl Scalar for f64 { }
impl Scalar for Complex<f32> { }
impl Scalar for Complex<f64> { }
macro_rules! impl_binary_op(
($trt:ident, $mth:ident, $imth:ident, $imth_scalar:ident, $doc:expr) => (
#[doc=$doc]
impl<A, S, S2, D, E> $trt<ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: DataOwned<Elem=A> + DataMut,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = ArrayBase<S, D>;
fn $mth(self, rhs: ArrayBase<S2, E>) -> ArrayBase<S, D>
{
self.$mth(&rhs)
}
}
#[doc=$doc]
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = ArrayBase<S, D>;
fn $mth (mut self, rhs: &ArrayBase<S2, E>) -> ArrayBase<S, D>
{
self.$imth(rhs);
self
}
}
#[doc=$doc]
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for &'a ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: Data<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = OwnedArray<A, D>;
fn $mth (self, rhs: &'a ArrayBase<S2, E>) -> OwnedArray<A, D>
{
self.to_owned().$mth(rhs)
}
}
#[doc=$doc]
impl<A, S, D, B> $trt<B> for ArrayBase<S, D>
where A: Clone + $trt<B, Output=A>,
S: DataOwned<Elem=A> + DataMut,
D: Dimension,
B: Clone + Scalar,
{
type Output = ArrayBase<S, D>;
fn $mth (mut self, x: B) -> ArrayBase<S, D>
{
self.unordered_foreach_mut(move |elt| {
*elt = elt.clone().$mth(x.clone());
});
self
}
}
#[doc=$doc]
impl<'a, A, S, D, B> $trt<B> for &'a ArrayBase<S, D>
where A: Clone + $trt<B, Output=A>,
S: Data<Elem=A>,
D: Dimension,
B: Clone + Scalar,
{
type Output = OwnedArray<A, D>;
fn $mth(self, x: B) -> OwnedArray<A, D>
{
self.to_owned().$mth(x)
}
}
);
);
macro_rules! impl_scalar_op {
($scalar:ty, $trt:ident, $mth:ident, $doc:expr) => (
impl<S, D> $trt<ArrayBase<S, D>> for $scalar
where S: DataMut<Elem=$scalar>,
D: Dimension,
{
type Output = ArrayBase<S, D>;
fn $mth (self, mut rhs: ArrayBase<S, D>) -> ArrayBase<S, D>
{
rhs.unordered_foreach_mut(move |elt| {
*elt = self.$mth(*elt);
});
rhs
}
}
impl<'a, S, D> $trt<&'a ArrayBase<S, D>> for $scalar
where S: Data<Elem=$scalar>,
D: Dimension,
{
type Output = OwnedArray<$scalar, D>;
fn $mth (self, rhs: &ArrayBase<S, D>) -> OwnedArray<$scalar, D>
{
self.$mth(rhs.to_owned())
}
}
);
}
mod arithmetic_ops {
use super::*;
use std::ops::*;
use libnum::Complex;
impl_binary_op!(Add, add, iadd, iadd_scalar, "addition");
impl_binary_op!(Sub, sub, isub, isub_scalar, "subtraction");
impl_binary_op!(Mul, mul, imul, imul_scalar, "multiplication");
impl_binary_op!(Div, div, idiv, idiv_scalar, "division");
impl_binary_op!(Rem, rem, irem, irem_scalar, "remainder");
impl_binary_op!(BitAnd, bitand, ibitand, ibitand_scalar, "bit and");
impl_binary_op!(BitOr, bitor, ibitor, ibitor_scalar, "bit or");
impl_binary_op!(BitXor, bitxor, ibitxor, ibitxor_scalar, "bit xor");
impl_binary_op!(Shl, shl, ishl, ishl_scalar, "left shift");
impl_binary_op!(Shr, shr, ishr, ishr_scalar, "right shift");
macro_rules! all_scalar_ops {
($int_scalar:ty) => (
impl_scalar_op!($int_scalar, Add, add, "addition");
impl_scalar_op!($int_scalar, Sub, sub, "subtraction");
impl_scalar_op!($int_scalar, Mul, mul, "multiplication");
impl_scalar_op!($int_scalar, Div, div, "division");
impl_scalar_op!($int_scalar, Rem, rem, "remainder");
impl_scalar_op!($int_scalar, BitAnd, bitand, "bit and");
impl_scalar_op!($int_scalar, BitOr, bitor, "bit or");
impl_scalar_op!($int_scalar, BitXor, bitxor, "bit xor");
impl_scalar_op!($int_scalar, Shl, shl, "left shift");
impl_scalar_op!($int_scalar, Shr, shr, "right shift");
);
}
all_scalar_ops!(i8);
all_scalar_ops!(u8);
all_scalar_ops!(i16);
all_scalar_ops!(u16);
all_scalar_ops!(i32);
all_scalar_ops!(u32);
all_scalar_ops!(i64);
all_scalar_ops!(u64);
impl_scalar_op!(bool, BitAnd, bitand, "bit and");
impl_scalar_op!(bool, BitOr, bitor, "bit or");
impl_scalar_op!(bool, BitXor, bitxor, "bit xor");
impl_scalar_op!(f32, Add, add, "addition");
impl_scalar_op!(f32, Sub, sub, "subtraction");
impl_scalar_op!(f32, Mul, mul, "multiplication");
impl_scalar_op!(f32, Div, div, "division");
impl_scalar_op!(f32, Rem, rem, "remainder");
impl_scalar_op!(f64, Add, add, "addition");
impl_scalar_op!(f64, Sub, sub, "subtraction");
impl_scalar_op!(f64, Mul, mul, "multiplication");
impl_scalar_op!(f64, Div, div, "division");
impl_scalar_op!(f64, Rem, rem, "remainder");
impl_scalar_op!(Complex<f32>, Add, add, "addition");
impl_scalar_op!(Complex<f32>, Sub, sub, "subtraction");
impl_scalar_op!(Complex<f32>, Mul, mul, "multiplication");
impl_scalar_op!(Complex<f32>, Div, div, "division");
impl_scalar_op!(Complex<f64>, Add, add, "addition");
impl_scalar_op!(Complex<f64>, Sub, sub, "subtraction");
impl_scalar_op!(Complex<f64>, Mul, mul, "multiplication");
impl_scalar_op!(Complex<f64>, Div, div, "division");
impl<A, S, D> Neg for ArrayBase<S, D>
where A: Clone + Neg<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
fn neg(mut self) -> Self {
self.ineg();
self
}
}
impl<A, S, D> Not for ArrayBase<S, D>
where A: Clone + Not<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
fn not(mut self) -> Self {
self.inot();
self
}
}
}
#[cfg(feature = "assign_ops")]
mod assign_ops {
use super::*;
macro_rules! impl_assign_op {
($trt:ident, $method:ident, $doc:expr) => {
use std::ops::$trt;
#[doc=$doc]
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
fn $method(&mut self, rhs: &ArrayBase<S2, E>) {
self.zip_mut_with(rhs, |x, y| {
x.$method(y.clone());
});
}
}
#[doc=$doc]
impl<A, S, D, B> $trt<B> for ArrayBase<S, D>
where A: $trt<B>,
S: DataMut<Elem=A>,
D: Dimension,
B: Clone + Scalar,
{
fn $method(&mut self, rhs: B) {
self.unordered_foreach_mut(move |elt| {
elt.$method(rhs.clone());
});
}
}
};
}
impl_assign_op!(AddAssign, add_assign,
"Perform `self += rhs` as elementwise addition (in place).\n");
impl_assign_op!(SubAssign, sub_assign,
"Perform `self -= rhs` as elementwise subtraction (in place).\n");
impl_assign_op!(MulAssign, mul_assign,
"Perform `self *= rhs` as elementwise multiplication (in place).\n");
impl_assign_op!(DivAssign, div_assign,
"Perform `self /= rhs` as elementwise division (in place).\n");
impl_assign_op!(RemAssign, rem_assign,
"Perform `self %= rhs` as elementwise remainder (in place).\n");
impl_assign_op!(BitAndAssign, bitand_assign,
"Perform `self &= rhs` as elementwise bit and (in place).\n");
impl_assign_op!(BitOrAssign, bitor_assign,
"Perform `self |= rhs` as elementwise bit or (in place).\n");
impl_assign_op!(BitXorAssign, bitxor_assign,
"Perform `self ^= rhs` as elementwise bit xor (in place).\n");
}
impl<'a, A> ArrayBase<ViewRepr<&'a A>, Ix> {
#[inline]
pub fn from_slice(xs: &'a [A]) -> Self {
ArrayView {
data: ViewRepr::new(),
ptr: xs.as_ptr() as *mut A,
dim: xs.len(),
strides: 1,
}
}
}
impl<'a, A, D> ArrayBase<ViewRepr<&'a A>, D>
where D: Dimension,
{
#[inline(always)]
unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self {
ArrayView {
data: ViewRepr::new(),
ptr: ptr as *mut A,
dim: dim,
strides: strides,
}
}
pub fn from_slice_dim_stride(dim: D, strides: D, xs: &'a [A])
-> Result<Self, StrideError>
{
dimension::can_index_slice(xs, &dim, &strides).map(|_| {
unsafe {
Self::new_(xs.as_ptr(), dim, strides)
}
})
}
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBase<'a, A, D> {
ElementsBase { inner: self.into_base_iter() }
}
fn into_iter_(self) -> Elements<'a, A, D> {
Elements {
inner: if let Some(slc) = self.into_slice() {
ElementsRepr::Slice(slc.iter())
} else {
ElementsRepr::Counted(self.into_elements_base())
},
}
}
fn into_slice(&self) -> Option<&'a [A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
#[doc(hidden)] #[cfg_attr(has_deprecated, deprecated(note="This method will be replaced."))]
pub fn into_outer_iter(self) -> OuterIter<'a, A, D::Smaller>
where D: RemoveAxis,
{
iterators::new_outer_iter(self)
}
pub fn split_at(self, axis: Axis, index: Ix)
-> (Self, Self)
{
let axis = axis.axis();
assert!(index <= self.shape()[axis]);
let left_ptr = self.ptr;
let right_ptr = if index == self.shape()[axis] {
self.ptr
} else {
let offset = stride_offset(index, self.strides.slice()[axis]);
unsafe {
self.ptr.offset(offset)
}
};
let mut dim_left = self.dim.clone();
dim_left.slice_mut()[axis] = index;
let left = unsafe {
Self::new_(left_ptr, dim_left, self.strides.clone())
};
let mut dim_right = self.dim;
let right_len = dim_right.slice()[axis] - index;
dim_right.slice_mut()[axis] = right_len;
let right = unsafe {
Self::new_(right_ptr, dim_right, self.strides)
};
(left, right)
}
}
impl<'a, A> ArrayBase<ViewRepr<&'a mut A>, Ix> {
#[inline]
pub fn from_slice(xs: &'a mut [A]) -> Self {
ArrayViewMut {
data: ViewRepr::new(),
ptr: xs.as_mut_ptr(),
dim: xs.len(),
strides: 1,
}
}
}
impl<'a, A, D> ArrayBase<ViewRepr<&'a mut A>, D>
where D: Dimension,
{
#[inline(always)]
unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self {
ArrayViewMut {
data: ViewRepr::new(),
ptr: ptr,
dim: dim,
strides: strides,
}
}
pub fn from_slice_dim_stride(dim: D, strides: D, xs: &'a mut [A])
-> Result<Self, StrideError>
{
dimension::can_index_slice(xs, &dim, &strides).map(|_| {
unsafe {
Self::new_(xs.as_mut_ptr(), dim, strides)
}
})
}
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> {
ElementsBaseMut { inner: self.into_base_iter() }
}
fn into_iter_(self) -> ElementsMut<'a, A, D> {
ElementsMut {
inner:
if self.is_standard_layout() {
let slc = unsafe {
slice::from_raw_parts_mut(self.ptr, self.len())
};
ElementsRepr::Slice(slc.iter_mut())
} else {
ElementsRepr::Counted(self.into_elements_base())
}
}
}
fn _into_slice_mut(self) -> Option<&'a mut [A]>
{
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
#[doc(hidden)] #[cfg_attr(has_deprecated, deprecated(note="This method will be replaced."))]
pub fn into_outer_iter(self) -> OuterIterMut<'a, A, D::Smaller>
where D: RemoveAxis,
{
iterators::new_outer_iter_mut(self)
}
pub fn split_at(self, axis: Axis, index: Ix)
-> (Self, Self)
{
let axis = axis.axis();
assert!(index <= self.shape()[axis]);
let left_ptr = self.ptr;
let right_ptr = if index == self.shape()[axis] {
self.ptr
}
else {
let offset = stride_offset(index, self.strides.slice()[axis]);
unsafe {
self.ptr.offset(offset)
}
};
let mut dim_left = self.dim.clone();
dim_left.slice_mut()[axis] = index;
let left = unsafe {
Self::new_(left_ptr, dim_left, self.strides.clone())
};
let mut dim_right = self.dim;
let right_len = dim_right.slice()[axis] - index;
dim_right.slice_mut()[axis] = right_len;
let right = unsafe {
Self::new_(right_ptr, dim_right, self.strides)
};
(left, right)
}
}
pub struct Elements<'a, A: 'a, D> {
inner: ElementsRepr<Iter<'a, A>, ElementsBase<'a, A, D>>,
}
struct ElementsBase<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
pub struct ElementsMut<'a, A: 'a, D> {
inner: ElementsRepr<IterMut<'a, A>, ElementsBaseMut<'a, A, D>>,
}
struct ElementsBaseMut<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
#[derive(Clone)]
pub struct Indexed<'a, A: 'a, D>(ElementsBase<'a, A, D>);
pub struct IndexedMut<'a, A: 'a, D>(ElementsBaseMut<'a, A, D>);
fn zipsl<T, U>(t: T, u: U) -> ZipSlices<T, U>
where T: itertools::misc::Slice, U: itertools::misc::Slice
{
ZipSlices::from_slices(t, u)
}
enum ElementsRepr<S, C> {
Slice(S),
Counted(C),
}