use num::Zero;
use simba::scalar::ClosedAdd;
use std::iter;
use std::marker::PhantomData;
use std::ops::Range;
use std::slice;
use crate::allocator::Allocator;
use crate::sparse::cs_utils;
use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1};
pub struct ColumnEntries<'a, T> {
curr: usize,
i: &'a [usize],
v: &'a [T],
}
impl<'a, T> ColumnEntries<'a, T> {
#[inline]
pub fn new(i: &'a [usize], v: &'a [T]) -> Self {
assert_eq!(i.len(), v.len());
Self { curr: 0, i, v }
}
}
impl<'a, T: Clone> Iterator for ColumnEntries<'a, T> {
type Item = (usize, T);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.curr >= self.i.len() {
None
} else {
let res = Some(
(unsafe { self.i.get_unchecked(self.curr).clone() }, unsafe {
self.v.get_unchecked(self.curr).clone()
}),
);
self.curr += 1;
res
}
}
}
pub trait CsStorageIter<'a, T, R, C = U1> {
type ColumnEntries: Iterator<Item = (usize, T)>;
type ColumnRowIndices: Iterator<Item = usize>;
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices;
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries;
}
pub trait CsStorageIterMut<'a, T: 'a, R, C = U1> {
type ValuesMut: Iterator<Item = &'a mut T>;
type ColumnEntriesMut: Iterator<Item = (usize, &'a mut T)>;
fn values_mut(&'a mut self) -> Self::ValuesMut;
fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut;
}
pub trait CsStorage<T, R, C = U1>: for<'a> CsStorageIter<'a, T, R, C> {
fn shape(&self) -> (R, C);
unsafe fn row_index_unchecked(&self, i: usize) -> usize;
unsafe fn get_value_unchecked(&self, i: usize) -> &T;
fn get_value(&self, i: usize) -> &T;
fn row_index(&self, i: usize) -> usize;
fn column_range(&self, i: usize) -> Range<usize>;
fn len(&self) -> usize;
}
pub trait CsStorageMut<T, R, C = U1>:
CsStorage<T, R, C> + for<'a> CsStorageIterMut<'a, T, R, C>
{
}
#[derive(Clone, Debug, PartialEq)]
pub struct CsVecStorage<T: Scalar, R: Dim, C: Dim>
where
DefaultAllocator: Allocator<usize, C>,
{
pub(crate) shape: (R, C),
pub(crate) p: OVector<usize, C>,
pub(crate) i: Vec<usize>,
pub(crate) vals: Vec<T>,
}
impl<T: Scalar, R: Dim, C: Dim> CsVecStorage<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
pub fn values(&self) -> &[T] {
&self.vals
}
pub fn p(&self) -> &[usize] {
self.p.as_slice()
}
pub fn i(&self) -> &[usize] {
&self.i
}
}
impl<T: Scalar, R: Dim, C: Dim> CsVecStorage<T, R, C> where DefaultAllocator: Allocator<usize, C> {}
impl<'a, T: Scalar, R: Dim, C: Dim> CsStorageIter<'a, T, R, C> for CsVecStorage<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
type ColumnEntries = ColumnEntries<'a, T>;
type ColumnRowIndices = iter::Cloned<slice::Iter<'a, usize>>;
#[inline]
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries {
let rng = self.column_range(j);
ColumnEntries::new(&self.i[rng.clone()], &self.vals[rng])
}
#[inline]
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices {
let rng = self.column_range(j);
self.i[rng.clone()].iter().cloned()
}
}
impl<T: Scalar, R: Dim, C: Dim> CsStorage<T, R, C> for CsVecStorage<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
#[inline]
fn shape(&self) -> (R, C) {
self.shape
}
#[inline]
fn len(&self) -> usize {
self.vals.len()
}
#[inline]
fn row_index(&self, i: usize) -> usize {
self.i[i]
}
#[inline]
unsafe fn row_index_unchecked(&self, i: usize) -> usize {
*self.i.get_unchecked(i)
}
#[inline]
unsafe fn get_value_unchecked(&self, i: usize) -> &T {
self.vals.get_unchecked(i)
}
#[inline]
fn get_value(&self, i: usize) -> &T {
&self.vals[i]
}
#[inline]
fn column_range(&self, j: usize) -> Range<usize> {
let end = if j + 1 == self.p.len() {
self.len()
} else {
self.p[j + 1]
};
self.p[j]..end
}
}
impl<'a, T: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, T, R, C> for CsVecStorage<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
type ValuesMut = slice::IterMut<'a, T>;
type ColumnEntriesMut = iter::Zip<iter::Cloned<slice::Iter<'a, usize>>, slice::IterMut<'a, T>>;
#[inline]
fn values_mut(&'a mut self) -> Self::ValuesMut {
self.vals.iter_mut()
}
#[inline]
fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut {
let rng = self.column_range(j);
self.i[rng.clone()]
.iter()
.cloned()
.zip(self.vals[rng].iter_mut())
}
}
impl<T: Scalar, R: Dim, C: Dim> CsStorageMut<T, R, C> for CsVecStorage<T, R, C> where
DefaultAllocator: Allocator<usize, C>
{
}
#[derive(Clone, Debug, PartialEq)]
pub struct CsMatrix<
T: Scalar,
R: Dim = Dynamic,
C: Dim = Dynamic,
S: CsStorage<T, R, C> = CsVecStorage<T, R, C>,
> {
pub(crate) data: S,
_phantoms: PhantomData<(T, R, C)>,
}
pub type CsVector<T, R = Dynamic, S = CsVecStorage<T, R, U1>> = CsMatrix<T, R, U1, S>;
impl<T: Scalar, R: Dim, C: Dim> CsMatrix<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self {
let mut i = Vec::with_capacity(nvals);
unsafe {
i.set_len(nvals);
}
i.shrink_to_fit();
let mut vals = Vec::with_capacity(nvals);
unsafe {
vals.set_len(nvals);
}
vals.shrink_to_fit();
CsMatrix {
data: CsVecStorage {
shape: (nrows, ncols),
p: OVector::zeros_generic(ncols, Const::<1>),
i,
vals,
},
_phantoms: PhantomData,
}
}
}
impl<T: Scalar, R: Dim, C: Dim, S: CsStorage<T, R, C>> CsMatrix<T, R, C, S> {
pub(crate) fn from_data(data: S) -> Self {
CsMatrix {
data,
_phantoms: PhantomData,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn nrows(&self) -> usize {
self.data.shape().0.value()
}
pub fn ncols(&self) -> usize {
self.data.shape().1.value()
}
pub fn shape(&self) -> (usize, usize) {
let (nrows, ncols) = self.data.shape();
(nrows.value(), ncols.value())
}
pub fn is_square(&self) -> bool {
let (nrows, ncols) = self.data.shape();
nrows.value() == ncols.value()
}
pub fn is_sorted(&self) -> bool {
for j in 0..self.ncols() {
let mut curr = None;
for idx in self.data.column_row_indices(j) {
if let Some(curr) = curr {
if idx <= curr {
return false;
}
}
curr = Some(idx);
}
}
true
}
pub fn transpose(&self) -> CsMatrix<T, C, R>
where
DefaultAllocator: Allocator<usize, R>,
{
let (nrows, ncols) = self.data.shape();
let nvals = self.len();
let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals);
let mut workspace = Vector::zeros_generic(nrows, Const::<1>);
for i in 0..nvals {
let row_id = self.data.row_index(i);
workspace[row_id] += 1;
}
let _ = cs_utils::cumsum(&mut workspace, &mut res.data.p);
for j in 0..ncols.value() {
for (row_id, value) in self.data.column_entries(j) {
let shift = workspace[row_id];
res.data.vals[shift] = value;
res.data.i[shift] = j;
workspace[row_id] += 1;
}
}
res
}
}
impl<T: Scalar, R: Dim, C: Dim, S: CsStorageMut<T, R, C>> CsMatrix<T, R, C, S> {
#[inline]
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut T> {
self.data.values_mut()
}
}
impl<T: Scalar, R: Dim, C: Dim> CsMatrix<T, R, C>
where
DefaultAllocator: Allocator<usize, C>,
{
pub(crate) fn sort(&mut self)
where
DefaultAllocator: Allocator<T, R>,
{
let nrows = self.data.shape().0;
let mut workspace =
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) };
self.sort_with_workspace(workspace.as_mut_slice());
}
pub(crate) fn sort_with_workspace(&mut self, workspace: &mut [T]) {
assert!(
workspace.len() >= self.nrows(),
"Workspace must be able to hold at least self.nrows() elements."
);
for j in 0..self.ncols() {
for (irow, val) in self.data.column_entries(j) {
workspace[irow] = val;
}
let range = self.data.column_range(j);
self.data.i[range.clone()].sort();
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
self.data.vals[i] = workspace[irow].inlined_clone();
}
}
}
pub(crate) fn dedup(&mut self)
where
T: Zero + ClosedAdd,
{
let mut curr_i = 0;
for j in 0..self.ncols() {
let range = self.data.column_range(j);
self.data.p[j] = curr_i;
if range.start != range.end {
let mut value = T::zero();
let mut irow = self.data.i[range.start];
for idx in range {
let curr_irow = self.data.i[idx];
if curr_irow == irow {
value += self.data.vals[idx].inlined_clone();
} else {
self.data.i[curr_i] = irow;
self.data.vals[curr_i] = value;
value = self.data.vals[idx].inlined_clone();
irow = curr_irow;
curr_i += 1;
}
}
self.data.i[curr_i] = irow;
self.data.vals[curr_i] = value;
curr_i += 1;
}
}
self.data.i.truncate(curr_i);
self.data.i.shrink_to_fit();
self.data.vals.truncate(curr_i);
self.data.vals.shrink_to_fit();
}
}