use std::ops::Add;
use libnum::{self, Zero, Float};
use itertools::free::enumerate;
use imp_prelude::*;
use numeric_util;
use {
LinalgScalar,
};
impl<A, S, D> ArrayBase<S, D>
where S: Data<Elem=A>,
D: Dimension,
{
pub fn scalar_sum(&self) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
if let Some(slc) = self.as_slice_memory_order() {
return numeric_util::unrolled_sum(slc);
}
let mut sum = A::zero();
for row in self.inner_iter() {
if let Some(slc) = row.as_slice() {
sum = sum + numeric_util::unrolled_sum(slc);
} else {
sum = sum + row.iter().fold(A::zero(), |acc, elt| acc + elt.clone());
}
}
sum
}
pub fn sum(&self, axis: Axis) -> Array<A, <D as RemoveAxis>::Smaller>
where A: Clone + Zero + Add<Output=A>,
D: RemoveAxis,
{
let n = self.shape().axis(axis);
let mut res = self.subview(axis, 0).to_owned();
let stride = self.strides()[axis.axis()];
if self.ndim() == 2 && stride == 1 {
let ax = axis.axis();
for (i, elt) in enumerate(&mut res) {
*elt = self.subview(Axis(1 - ax), i).scalar_sum();
}
} else {
for i in 1..n {
let view = self.subview(axis, i);
res = res + &view;
}
}
res
}
pub fn mean(&self, axis: Axis) -> Array<A, <D as RemoveAxis>::Smaller>
where A: LinalgScalar,
D: RemoveAxis,
{
let n = self.shape().axis(axis);
let sum = self.sum(axis);
let mut cnt = A::one();
for _ in 1..n {
cnt = cnt + A::one();
}
sum / &aview0(&cnt)
}
pub fn all_close<S2, E>(&self, rhs: &ArrayBase<S2, E>, tol: A) -> bool
where A: Float,
S2: Data<Elem=A>,
E: Dimension,
{
let rhs_broadcast = rhs.broadcast_unwrap(self.raw_dimension());
self.iter().zip(rhs_broadcast.iter()).all(|(x, y)| (*x - *y).abs() <= tol)
}
}