use crate::Float;
use ndarray;
use std::sync::{Arc, RwLock};
pub type NdArray<T> = ndarray::Array<T, ndarray::IxDyn>;
pub type NdArrayView<'a, T> = ndarray::ArrayView<'a, T, ndarray::IxDyn>;
pub type NdArrayViewMut<'a, T> = ndarray::ArrayViewMut<'a, T, ndarray::IxDyn>;
pub use crate::array_gen::*;
#[derive(Clone)]
pub(crate) enum ArrRepr<'v, T: Float> {
Owned(NdArray<T>),
View(NdArrayView<'v, T>),
}
#[inline]
pub fn into_shared<F: Float, D: ndarray::Dimension>(
arr: ndarray::Array<F, D>,
) -> Arc<RwLock<NdArray<F>>> {
Arc::new(RwLock::new(arr.into_dyn()))
}
#[inline]
pub(crate) fn as_shape<T: Float>(x: &NdArrayView<T>) -> Vec<usize> {
let mut target = Vec::with_capacity(x.len());
for &a in x.iter() {
target.push(a.to_usize().unwrap());
}
target
}
#[inline]
pub(crate) fn expand_dims<T: Float>(x: NdArray<T>, axis: usize) -> NdArray<T> {
let mut shape = x.shape().to_vec();
shape.insert(axis, 1);
x.into_shape(shape).unwrap()
}
#[inline]
pub(crate) fn roll_axis<T: Float>(arg: &mut NdArray<T>, to: ndarray::Axis, from: ndarray::Axis) {
let i = to.index();
let mut j = from.index();
if j > i {
while i != j {
arg.swap_axes(i, j);
j -= 1;
}
} else {
while i != j {
arg.swap_axes(i, j);
j += 1;
}
}
}
#[inline]
pub(crate) fn normalize_negative_axis(axis: isize, ndim: usize) -> usize {
if axis < 0 {
(ndim as isize + axis) as usize
} else {
axis as usize
}
}
#[inline]
pub(crate) fn normalize_negative_axes<T: Float>(axes: &NdArrayView<T>, ndim: usize) -> Vec<usize> {
let mut axes_ret: Vec<usize> = Vec::with_capacity(axes.len());
for &axis in axes.iter() {
let axis = if axis < T::zero() {
(T::from(ndim).unwrap() + axis)
.to_usize()
.expect("Invalid index value")
} else {
axis.to_usize().expect("Invalid index value")
};
axes_ret.push(axis);
}
axes_ret
}
#[inline]
pub(crate) fn sparse_to_dense<T: Float>(arr: &NdArrayView<T>) -> Vec<usize> {
let mut axes: Vec<usize> = vec![];
for (i, &a) in arr.iter().enumerate() {
if a == T::one() {
axes.push(i);
}
}
axes
}
#[allow(unused)]
#[inline]
pub(crate) fn is_fully_transposed(strides: &[ndarray::Ixs]) -> bool {
let mut ret = true;
for w in strides.windows(2) {
if w[0] > w[1] {
ret = false;
break;
}
}
ret
}
#[inline]
pub(crate) fn copy_if_not_standard<T: Float>(x: &NdArrayView<T>) -> Option<NdArray<T>> {
if !x.is_standard_layout() {
Some(deep_copy(x))
} else {
None
}
}
#[inline]
pub(crate) fn deep_copy<T: Float>(x: &NdArrayView<T>) -> NdArray<T> {
let vec = x.iter().cloned().collect::<Vec<_>>();
unsafe { NdArray::from_shape_vec_unchecked(x.shape(), vec) }
}
#[inline]
pub(crate) fn scalar_shape<T: Float>() -> NdArray<T> {
unsafe { NdArray::from_shape_vec_unchecked(ndarray::IxDyn(&[0]), vec![]) }
}
#[inline]
pub(crate) fn is_scalar_shape(shape: &[usize]) -> bool {
shape == [] || shape == [0]
}
#[inline]
pub(crate) fn shape_of_view<T: Float>(x: &NdArrayView<T>) -> NdArray<T> {
let shape = x
.shape()
.iter()
.map(|&a| T::from(a).unwrap())
.collect::<Vec<T>>();
let rank = shape.len();
NdArray::from_shape_vec(ndarray::IxDyn(&[rank]), shape).unwrap()
}
#[inline]
pub(crate) fn shape_of<T: Float>(x: &NdArray<T>) -> NdArray<T> {
let shape = x
.shape()
.iter()
.map(|&a| T::from(a).unwrap())
.collect::<Vec<T>>();
let rank = shape.len();
NdArray::from_shape_vec(ndarray::IxDyn(&[rank]), shape).unwrap()
}
#[cfg(feature = "mkl")]
#[inline]
pub(crate) fn get_batch_ptrs_mut<A: Float, B>(
batch_size: usize,
head: *mut A,
whole_size: usize,
) -> Vec<*mut B> {
let size_per_sample = whole_size / batch_size;
let mut ret = Vec::with_capacity(batch_size);
for i in 0..batch_size {
unsafe {
ret.push(head.offset((i * size_per_sample) as isize) as *mut B);
}
}
ret
}
#[cfg(feature = "mkl")]
#[inline]
pub(crate) fn get_batch_ptrs<A: Float, B>(
batch_size: usize,
head: *const A,
whole_size: usize,
) -> Vec<*const B> {
let size_per_sample = whole_size / batch_size;
let mut ret = Vec::with_capacity(batch_size);
for i in 0..batch_size {
unsafe {
ret.push(head.offset((i * size_per_sample) as isize) as *const B);
}
}
ret
}
pub mod array_gen {
use super::*;
use rand::distributions::Distribution;
use rand::rngs::ThreadRng;
use rand::{self, Rng};
use rand_distr;
use std::marker::PhantomData;
use std::sync::Mutex;
pub struct ArrayRng<T: Float, R: Rng = ThreadRng> {
phantom: PhantomData<T>,
rng: Mutex<R>,
}
impl<T: Float> Default for ArrayRng<T> {
fn default() -> Self {
ArrayRng {
phantom: PhantomData,
rng: Mutex::new(rand::thread_rng()),
}
}
}
impl<T: Float, R: Rng> ArrayRng<T, R> {
pub fn new(rng: R) -> Self {
ArrayRng {
phantom: PhantomData,
rng: Mutex::new(rng),
}
}
fn gen_random_array<I>(&self, shape: &[usize], dist: I) -> NdArray<T>
where
I: Distribution<f64>,
{
let size: usize = shape.iter().cloned().product();
let mut rng = self.rng.lock().unwrap();
unsafe {
let mut buf = Vec::with_capacity(size);
for i in 0..size {
*buf.get_unchecked_mut(i) = T::from(dist.sample(&mut *rng)).unwrap();
}
buf.set_len(size);
NdArray::from_shape_vec_unchecked(shape, buf)
}
}
pub fn random_normal(
&self,
shape: &[usize],
mean: f64,
stddev: f64,
) -> ndarray::Array<T, ndarray::IxDyn> {
let normal = rand_distr::Normal::new(mean, stddev).unwrap();
self.gen_random_array(shape, normal)
}
pub fn random_uniform(
&self,
shape: &[usize],
min: f64,
max: f64,
) -> ndarray::Array<T, ndarray::IxDyn> {
let range = rand_distr::Uniform::new(min, max);
self.gen_random_array(shape, range)
}
pub fn standard_normal(&self, shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
let normal = rand_distr::Normal::new(0., 1.).unwrap();
self.gen_random_array(shape, normal)
}
pub fn standard_uniform(&self, shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
let dist = rand_distr::Uniform::new(0., 1.);
self.gen_random_array(shape, dist)
}
pub fn glorot_normal(&self, shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
assert_eq!(shape.len(), 2);
let s = 1. / (shape[0] as f64).sqrt();
let normal = rand_distr::Normal::new(0., s).unwrap();
self.gen_random_array(shape, normal)
}
pub fn glorot_uniform(&self, shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
assert_eq!(shape.len(), 2);
let s = (6. / shape[0] as f64).sqrt();
let uniform = rand_distr::Uniform::new(-s, s);
self.gen_random_array(shape, uniform)
}
pub fn bernoulli(&self, shape: &[usize], p: f64) -> ndarray::Array<T, ndarray::IxDyn> {
let dist = rand_distr::Uniform::new(0., 1.);
let mut rng = self.rng.lock().unwrap();
let size: usize = shape.iter().cloned().product();
unsafe {
let mut buf = Vec::with_capacity(size);
for i in 0..size {
let val = dist.sample(&mut *rng);
*buf.get_unchecked_mut(i) = T::from(i32::from(val < p)).unwrap();
}
buf.set_len(size);
NdArray::from_shape_vec(shape, buf).unwrap()
}
}
pub fn exponential(
&self,
shape: &[usize],
lambda: f64,
) -> ndarray::Array<T, ndarray::IxDyn> {
let dist = rand_distr::Exp::new(lambda).unwrap();
self.gen_random_array(shape, dist)
}
pub fn log_normal(
&self,
shape: &[usize],
mean: f64,
stddev: f64,
) -> ndarray::Array<T, ndarray::IxDyn> {
let dist = rand_distr::LogNormal::new(mean, stddev).unwrap();
self.gen_random_array(shape, dist)
}
pub fn gamma(
&self,
shape: &[usize],
shape_param: f64,
scale: f64,
) -> ndarray::Array<T, ndarray::IxDyn> {
let dist = rand_distr::Gamma::new(shape_param, scale).unwrap();
self.gen_random_array(shape, dist)
}
}
#[inline]
pub fn zeros<T: Float>(shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
NdArray::from_elem(shape, T::zero())
}
#[inline]
pub fn ones<T: Float>(shape: &[usize]) -> ndarray::Array<T, ndarray::IxDyn> {
NdArray::from_elem(shape, T::one())
}
#[inline]
pub fn from_scalar<T: Float>(val: T) -> ndarray::Array<T, ndarray::IxDyn> {
NdArray::from_elem(ndarray::IxDyn(&[]), val)
}
}