#![crate_name="ndarray"]
#![crate_type="dylib"]
#![cfg_attr(feature = "assign_ops", feature(augmented_assignments,
op_assign_traits))]
#[cfg(feature = "serde")]
extern crate serde;
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize as serialize;
extern crate itertools as it;
#[cfg(not(nocomplex))]
extern crate num as libnum;
use std::mem;
use std::rc::Rc;
use libnum::Float;
use std::ops::{Add, Sub, Mul, Div, Rem, Neg, Not, Shr, Shl,
BitAnd,
BitOr,
BitXor,
};
pub use dimension::{Dimension, RemoveAxis};
pub use si::{Si, S, SliceRange};
use dimension::stride_offset;
pub use indexes::Indexes;
use iterators::Baseiter;
pub mod linalg;
mod arraytraits;
#[cfg(feature = "serde")]
mod arrayserialize;
mod arrayformat;
mod dimension;
mod indexes;
mod iterators;
mod si;
pub type Ix = u32;
pub type Ixs = i32;
pub struct Array<A, D> {
data: Rc<Vec<A>>,
ptr: *mut A,
dim: D,
strides: D,
}
impl<A, D: Clone> Clone for Array<A, D>
{
fn clone(&self) -> Array<A, D> {
Array {
data: self.data.clone(),
ptr: self.ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
}
}
}
impl<A> Array<A, Ix>
{
pub fn from_vec(v: Vec<A>) -> Array<A, Ix> {
unsafe {
Array::from_vec_dim(v.len() as Ix, v)
}
}
pub fn from_iter<I: Iterator<Item=A>>(it: I) -> Array<A, Ix> {
Array::from_vec(it.collect())
}
}
impl Array<f32, Ix>
{
pub fn range(begin: f32, end: f32) -> Array<f32, Ix>
{
let n = (end - begin) as usize;
let span = if n > 0 { (n - 1) as f32 } else { 0. };
Array::from_iter(it::linspace(begin,
begin + span,
n))
}
}
impl<A, D> Array<A, D> where D: Dimension
{
pub fn zeros(dim: D) -> Array<A, D> where A: Clone + libnum::Zero
{
Array::from_elem(dim, libnum::zero())
}
pub fn from_elem(dim: D, elem: A) -> Array<A, D> where A: Clone
{
let v = std::iter::repeat(elem).take(dim.size()).collect();
unsafe {
Array::from_vec_dim(dim, v)
}
}
pub unsafe fn from_vec_dim(dim: D, mut v: Vec<A>) -> Array<A, D>
{
debug_assert!(dim.size() == v.len());
Array {
ptr: v.as_mut_ptr(),
data: std::rc::Rc::new(v),
strides: dim.default_strides(),
dim: dim
}
}
pub fn len(&self) -> usize
{
self.dim.size()
}
pub fn dim(&self) -> D {
self.dim.clone()
}
pub fn shape(&self) -> &[Ix] {
self.dim.slice()
}
pub fn is_standard_layout(&self) -> bool
{
self.strides == self.dim.default_strides()
}
pub fn raw_data<'a>(&'a self) -> &'a [A]
{
&self.data
}
pub fn slice(&self, indexes: &[Si]) -> Array<A, D>
{
let mut arr = self.clone();
arr.islice(indexes);
arr
}
pub fn islice(&mut self, indexes: &[Si])
{
let offset = Dimension::do_slices(&mut self.dim, &mut self.strides, indexes);
unsafe {
self.ptr = self.ptr.offset(offset);
}
}
pub fn slice_iter<'a>(&'a self, indexes: &[Si]) -> Elements<'a, A, D>
{
let mut it = self.iter();
let offset = Dimension::do_slices(&mut it.inner.dim, &mut it.inner.strides, indexes);
unsafe {
it.inner.ptr = it.inner.ptr.offset(offset);
}
it
}
pub fn at<'a>(&'a self, index: D) -> Option<&'a A> {
self.dim.stride_offset_checked(&self.strides, &index)
.map(|offset| unsafe {
&*self.ptr.offset(offset)
})
}
#[inline]
pub unsafe fn uchk_at<'a>(&'a self, index: D) -> &'a A {
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&*self.ptr.offset(off)
}
#[inline]
pub unsafe fn uchk_at_mut(&mut self, index: D) -> &mut A {
debug_assert!(Rc::get_mut(&mut self.data).is_some());
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&mut *self.ptr.offset(off)
}
#[inline]
fn base_iter<'a>(&'a self) -> Baseiter<'a, A, D>
{
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
pub fn iter<'a>(&'a self) -> Elements<'a, A, D>
{
Elements { inner: self.base_iter() }
}
pub fn indexed_iter<'a>(&'a self) -> Indexed<Elements<'a, A, D>>
{
self.iter().indexed()
}
pub fn isubview(&mut self, axis: usize, index: Ix)
{
dimension::do_sub(&mut self.dim, &mut self.ptr, &self.strides, axis, index)
}
pub fn broadcast_iter<'a, E: Dimension>(&'a self, dim: E)
-> Option<Elements<'a, A, E>>
{
fn upcast<D: Dimension, E: Dimension>(to: &D, from: &E, stride: &E) -> Option<D> {
let mut new_stride = to.clone();
if to.ndim() < from.ndim() {
return None
}
{
let mut new_stride_iter = new_stride.slice_mut().iter_mut().rev();
for ((er, es), dr) in from.slice().iter().rev()
.zip(stride.slice().iter().rev())
.zip(new_stride_iter.by_ref())
{
if *dr == *er {
*dr = *es;
} else if *er == 1 {
*dr = 0
} else {
return None;
}
}
for dr in new_stride_iter {
*dr = 0;
}
}
Some(new_stride)
}
let broadcast_strides =
match upcast(&dim, &self.dim, &self.strides) {
Some(st) => st,
None => return None,
};
Some(Elements {
inner:
unsafe {
Baseiter::new(self.ptr, dim, broadcast_strides)
}
})
}
#[inline(never)]
fn broadcast_iter_unwrap<'a, E: Dimension>(&'a self, dim: E)
-> Elements<'a, A, E>
{
match self.broadcast_iter(dim.clone()) {
Some(it) => it,
None => panic!("Could not broadcast array from shape {:?} into: {:?}",
self.shape(), dim.slice())
}
}
pub fn swap_axes(&mut self, ax: usize, bx: usize)
{
self.dim.slice_mut().swap(ax, bx);
self.strides.slice_mut().swap(ax, bx);
}
fn diag_params(&self) -> (Ix, Ixs)
{
let len = self.dim.slice().iter().map(|x| *x).min().unwrap_or(1);
let stride = self.strides.slice().iter()
.map(|x| *x as Ixs)
.fold(0, |sum, s| sum + s);
return (len, stride)
}
pub fn diag_iter<'a>(&'a self) -> Elements<'a, A, Ix>
{
let (len, stride) = self.diag_params();
unsafe {
Elements { inner:
Baseiter::new(self.ptr, len, stride as Ix)
}
}
}
pub fn diag(&self) -> Array<A, Ix> {
let (len, stride) = self.diag_params();
Array {
data: self.data.clone(),
ptr: self.ptr,
dim: len,
strides: stride as Ix,
}
}
pub fn map<'a, B, F>(&'a self, mut f: F) -> Array<B, D> where
F: FnMut(&'a A) -> B
{
let mut res = Vec::<B>::with_capacity(self.dim.size());
for elt in self.iter() {
res.push(f(elt))
}
unsafe {
Array::from_vec_dim(self.dim.clone(), res)
}
}
pub fn subview(&self, axis: usize, index: Ix) -> Array<A, <D as RemoveAxis>::Smaller> where
D: RemoveAxis
{
let mut res = self.clone();
res.isubview(axis, index);
Array{
data: res.data,
ptr: res.ptr,
dim: res.dim.remove_axis(axis),
strides: res.strides.remove_axis(axis),
}
}
pub fn ensure_unique(&mut self) where A: Clone
{
if Rc::get_mut(&mut self.data).is_some() {
return
}
if self.dim.size() <= self.data.len() / 2 {
unsafe {
*self = Array::from_vec_dim(self.dim.clone(),
self.iter().map(|x| x.clone()).collect());
}
return;
}
let our_off = (self.ptr as isize - self.data.as_ptr() as isize)
/ mem::size_of::<A>() as isize;
let rvec = Rc::make_mut(&mut self.data);
unsafe {
self.ptr = rvec.as_mut_ptr().offset(our_off);
}
}
pub fn at_mut<'a>(&'a mut self, index: D) -> Option<&'a mut A> where A: Clone
{
self.ensure_unique();
self.dim.stride_offset_checked(&self.strides, &index)
.map(|offset| unsafe {
&mut *self.ptr.offset(offset)
})
}
pub fn iter_mut<'a>(&'a mut self) -> ElementsMut<'a, A, D> where A: Clone
{
self.ensure_unique();
ElementsMut { inner: self.base_iter() }
}
pub fn indexed_iter_mut<'a>(&'a mut self) -> Indexed<ElementsMut<'a, A, D>> where A: Clone
{
self.iter_mut().indexed()
}
pub fn slice_iter_mut<'a>(&'a mut self, indexes: &[Si]) -> ElementsMut<'a, A, D> where A: Clone
{
let mut it = self.iter_mut();
let offset = Dimension::do_slices(&mut it.inner.dim, &mut it.inner.strides, indexes);
unsafe {
it.inner.ptr = it.inner.ptr.offset(offset);
}
it
}
pub fn sub_iter_mut<'a>(&'a mut self, axis: usize, index: Ix)
-> ElementsMut<'a, A, D> where A: Clone
{
let mut it = self.iter_mut();
dimension::do_sub(&mut it.inner.dim, &mut it.inner.ptr, &it.inner.strides, axis, index);
it
}
pub fn diag_iter_mut<'a>(&'a mut self) -> ElementsMut<'a, A, Ix> where A: Clone
{
self.ensure_unique();
let (len, stride) = self.diag_params();
unsafe {
ElementsMut { inner:
Baseiter::new(self.ptr, len, stride as Ix),
}
}
}
pub fn raw_data_mut<'a>(&'a mut self) -> &'a mut [A]
where A: Clone
{
&mut Rc::make_mut(&mut self.data)[..]
}
pub fn reshape<E: Dimension>(&self, shape: E) -> Array<A, E> where A: Clone
{
if shape.size() != self.dim.size() {
panic!("Incompatible sizes in reshape, attempted from: {:?}, to: {:?}",
self.dim.slice(), shape.slice())
}
if self.is_standard_layout() {
let cl = self.clone();
Array{
data: cl.data,
ptr: cl.ptr,
strides: shape.default_strides(),
dim: shape,
}
} else {
let v = self.iter().map(|x| x.clone()).collect::<Vec<A>>();
unsafe {
Array::from_vec_dim(shape, v)
}
}
}
pub fn assign<E: Dimension>(&mut self, other: &Array<A, E>) where A: Clone
{
if self.shape() == other.shape() {
for (x, y) in self.iter_mut().zip(other.iter()) {
*x = y.clone();
}
} else {
let other_iter = other.broadcast_iter_unwrap(self.dim());
for (x, y) in self.iter_mut().zip(other_iter) {
*x = y.clone();
}
}
}
pub fn assign_scalar(&mut self, x: &A) where A: Clone
{
for elt in self.raw_data_mut().iter_mut() {
*elt = x.clone();
}
}
}
pub fn arr0<A>(x: A) -> Array<A, ()>
{
let mut v = Vec::with_capacity(1);
v.push(x);
unsafe { Array::from_vec_dim((), v) }
}
pub fn arr1<A: Clone>(xs: &[A]) -> Array<A, Ix>
{
Array::from_vec(xs.to_vec())
}
pub unsafe trait ArrInit<T> {
fn as_init_slice(&self) -> &[T];
fn is_fixed_size() -> bool { false }
}
unsafe impl<T> ArrInit<T> for [T]
{
fn as_init_slice(&self) -> &[T]
{
self
}
}
macro_rules! impl_arr_init {
(__impl $n: expr) => (
unsafe impl<T> ArrInit<T> for [T; $n] {
fn as_init_slice(&self) -> &[T] { self }
fn is_fixed_size() -> bool { true }
}
);
() => ();
($n: expr, $($m:expr,)*) => (
impl_arr_init!(__impl $n);
impl_arr_init!($($m,)*);
)
}
impl_arr_init!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,);
pub fn arr2<A: Clone, V: ArrInit<A>>(xs: &[V]) -> Array<A, (Ix, Ix)>
{
let (m, n) = (xs.len() as Ix,
xs.get(0).map_or(0, |snd| snd.as_init_slice().len() as Ix));
let dim = (m, n);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as ArrInit<A>>::is_fixed_size() || snd.len() as Ix == n);
result.extend(snd.iter().map(|x| x.clone()))
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
pub fn arr3<A: Clone, V: ArrInit<U>, U: ArrInit<A>>(xs: &[V]) -> Array<A, (Ix, Ix, Ix)>
{
let m = xs.len() as Ix;
let fst = xs.get(0).map(|snd| snd.as_init_slice());
let thr = fst.and_then(|elt| elt.get(0).map(|elt2| elt2.as_init_slice()));
let n = fst.map_or(0, |v| v.len() as Ix);
let o = thr.map_or(0, |v| v.len() as Ix);
let dim = (m, n, o);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as ArrInit<U>>::is_fixed_size() || snd.len() as Ix == n);
for thr in snd.iter() {
let thr = thr.as_init_slice();
assert!(<U as ArrInit<A>>::is_fixed_size() || thr.len() as Ix == o);
result.extend(thr.iter().map(|x| x.clone()))
}
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
impl<A, D> Array<A, D> where
A: Clone + Add<Output=A>,
D: RemoveAxis,
{
pub fn sum(&self, axis: usize) -> Array<A, <D as RemoveAxis>::Smaller>
{
let n = self.shape()[axis];
let mut res = self.subview(axis, 0);
for i in 1..n {
res.iadd(&self.subview(axis, i))
}
res
}
}
impl<A, D> Array<A, D> where
A: Copy + linalg::Field,
D: RemoveAxis,
{
pub fn mean(&self, axis: usize) -> Array<A, <D as RemoveAxis>::Smaller>
{
let n = self.shape()[axis];
let mut sum = self.sum(axis);
let one = libnum::one::<A>();
let mut cnt = one;
for _ in 1..n {
cnt = cnt + one;
}
for elt in sum.iter_mut() {
*elt = *elt / cnt;
}
sum
}
}
impl<A> Array<A, (Ix, Ix)>
{
pub fn row_iter<'a>(&'a self, index: Ix) -> Elements<'a, A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < m);
unsafe {
Elements { inner:
Baseiter::new(self.ptr.offset(stride_offset(index, sr)), n, sc)
}
}
}
pub fn col_iter<'a>(&'a self, index: Ix) -> Elements<'a, A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < n);
unsafe {
Elements { inner:
Baseiter::new(self.ptr.offset(stride_offset(index, sc)), m, sr)
}
}
}
}
impl<'a, A: Copy + linalg::Ring> Array<A, (Ix, Ix)>
{
pub fn mat_mul(&self, other: &Array<A, (Ix, Ix)>) -> Array<A, (Ix, Ix)>
{
let ((m, a), (b, n)) = (self.dim, other.dim);
let (self_columns, other_rows) = (a, b);
assert!(self_columns == other_rows);
let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize);
unsafe {
res_elems.set_len(m as usize * n as usize);
}
let mut i = 0;
let mut j = 0;
for rr in res_elems.iter_mut() {
unsafe {
let dot = (0..a).fold(libnum::zero::<A>(),
|s, k| s + *self.uchk_at((i, k)) * *other.uchk_at((k, j))
);
std::ptr::write(rr, dot);
}
j += 1;
if j == n {
j = 0;
i += 1;
}
}
unsafe {
Array::from_vec_dim((m, n), res_elems)
}
}
pub fn mat_mul_col(&self, other: &Array<A, Ix>) -> Array<A, Ix>
{
let ((m, a), n) = (self.dim, other.dim);
let (self_columns, other_rows) = (a, n);
assert!(self_columns == other_rows);
let mut res_elems = Vec::<A>::with_capacity(m as usize);
unsafe {
res_elems.set_len(m as usize);
}
let mut i = 0;
for rr in res_elems.iter_mut() {
unsafe {
let dot = (0..a).fold(libnum::zero::<A>(),
|s, k| s + *self.uchk_at((i, k)) * *other.uchk_at(k)
);
std::ptr::write(rr, dot);
}
i += 1;
}
unsafe {
Array::from_vec_dim(m, res_elems)
}
}
}
impl<A: Float + PartialOrd, D: Dimension> Array<A, D>
{
pub fn allclose(&self, other: &Array<A, D>, tol: A) -> bool
{
self.shape() == other.shape() &&
self.iter().zip(other.iter()).all(|(x, y)| (*x - *y).abs() <= tol)
}
}
macro_rules! impl_binary_op(
($trt:ident, $mth:ident, $imethod:ident, $imth_scalar:ident) => (
impl<A, D> Array<A, D> where
A: Clone + $trt<A, Output=A>,
D: Dimension,
{
pub fn $imethod <E: Dimension> (&mut self, other: &Array<A, E>)
{
if self.dim.ndim() == other.dim.ndim() &&
self.shape() == other.shape() {
for (x, y) in self.iter_mut().zip(other.iter()) {
*x = (x.clone()). $mth (y.clone());
}
} else {
let other_iter = other.broadcast_iter_unwrap(self.dim());
for (x, y) in self.iter_mut().zip(other_iter) {
*x = (x.clone()). $mth (y.clone());
}
}
}
pub fn $imth_scalar (&mut self, x: &A)
{
for elt in self.iter_mut() {
*elt = elt.clone(). $mth (x.clone());
}
}
}
impl<'a, A, D, E> $trt<Array<A, E>> for Array<A, D>
where A: Clone + $trt<A, Output=A>,
D: Dimension,
E: Dimension,
{
type Output = Array<A, D>;
fn $mth (mut self, other: Array<A, E>) -> Array<A, D>
{
if self.shape() == other.shape() {
for (x, y) in self.iter_mut().zip(other.iter()) {
*x = x.clone(). $mth (y.clone());
}
} else {
let other_iter = other.broadcast_iter_unwrap(self.dim());
for (x, y) in self.iter_mut().zip(other_iter) {
*x = x.clone(). $mth (y.clone());
}
}
self
}
}
impl<'a, A, D, E> $trt<&'a Array<A, E>> for &'a Array<A, D>
where A: Clone + $trt<A, Output=A>,
D: Dimension,
E: Dimension,
{
type Output = Array<A, D>;
fn $mth (self, other: &'a Array<A, E>) -> Array<A, D>
{
let mut result = Vec::<A>::with_capacity(self.dim.size());
if self.shape() == other.shape() {
for (x, y) in self.iter().zip(other.iter()) {
result.push((x.clone()). $mth (y.clone()));
}
} else {
let other_iter = other.broadcast_iter_unwrap(self.dim());
for (x, y) in self.iter().zip(other_iter) {
result.push((x.clone()). $mth (y.clone()));
}
}
unsafe {
Array::from_vec_dim(self.dim.clone(), result)
}
}
}
);
);
impl_binary_op!(Add, add, iadd, iadd_scalar);
impl_binary_op!(Sub, sub, isub, isub_scalar);
impl_binary_op!(Mul, mul, imul, imul_scalar);
impl_binary_op!(Div, div, idiv, idiv_scalar);
impl_binary_op!(Rem, rem, irem, irem_scalar);
impl_binary_op!(BitAnd, bitand, ibitand, ibitand_scalar);
impl_binary_op!(BitOr, bitor, ibitor, ibitor_scalar);
impl_binary_op!(BitXor, bitxor, ibitxor, ibitxor_scalar);
impl_binary_op!(Shl, shl, ishl, ishl_scalar);
impl_binary_op!(Shr, shr, ishr, ishr_scalar);
#[cfg(feature = "assign_ops")]
mod assign_ops {
use super::*;
use std::ops::{
AddAssign,
SubAssign,
MulAssign,
DivAssign,
RemAssign,
BitAndAssign,
BitOrAssign,
BitXorAssign,
};
macro_rules! impl_assign_op {
($trt:ident, $method:ident) => {
impl<'a, A, D, E> $trt<&'a Array<A, E>> for Array<A, D>
where A: Clone + $trt<A>,
D: Dimension,
E: Dimension,
{
fn $method(&mut self, other: &Array<A, E>) {
if self.shape() == other.shape() {
for (x, y) in self.iter_mut().zip(other.iter()) {
x.$method(y.clone());
}
} else {
let other_iter = other.broadcast_iter_unwrap(self.dim());
for (x, y) in self.iter_mut().zip(other_iter) {
x.$method(y.clone());
}
}
}
}
};
}
impl_assign_op!(AddAssign, add_assign);
impl_assign_op!(SubAssign, sub_assign);
impl_assign_op!(MulAssign, mul_assign);
impl_assign_op!(DivAssign, div_assign);
impl_assign_op!(RemAssign, rem_assign);
impl_assign_op!(BitAndAssign, bitand_assign);
impl_assign_op!(BitOrAssign, bitor_assign);
impl_assign_op!(BitXorAssign, bitxor_assign);
}
impl<A: Clone + Neg<Output=A>, D: Dimension>
Array<A, D>
{
pub fn ineg(&mut self)
{
for elt in self.iter_mut() {
*elt = elt.clone().neg()
}
}
}
impl<A: Clone + Neg<Output=A>, D: Dimension>
Neg for Array<A, D>
{
type Output = Self;
fn neg(mut self) -> Array<A, D>
{
self.ineg();
self
}
}
impl<A: Clone + Not<Output=A>, D: Dimension>
Array<A, D>
{
pub fn inot(&mut self)
{
for elt in self.iter_mut() {
*elt = elt.clone().not()
}
}
}
impl<A: Clone + Not<Output=A>, D: Dimension>
Not for Array<A, D>
{
type Output = Self;
fn not(mut self) -> Array<A, D>
{
self.inot();
self
}
}
pub struct Elements<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
impl<'a, A, D> Elements<'a, A, D> where D: Clone
{
pub fn dim(&self) -> D
{
self.inner.dim.clone()
}
pub fn indexed(self) -> Indexed<Elements<'a, A, D>>
{
Indexed {
inner: self,
}
}
}
pub struct ElementsMut<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
impl<'a, A, D> ElementsMut<'a, A, D> where D: Clone
{
pub fn dim(&self) -> D
{
self.inner.dim.clone()
}
pub fn indexed(self) -> Indexed<ElementsMut<'a, A, D>>
{
Indexed {
inner: self,
}
}
}
#[derive(Clone)]
pub struct Indexed<I> {
inner: I,
}