use scirs2_autograd as ag;
pub use ag::tensor_ops;
pub use ag::{Context, Float, Tensor};
pub mod batch {
}
pub mod factorizations {
}
pub mod matrix_calculus;
pub mod helpers {
use super::*;
pub fn trace_workaround<'g, F: ag::Float>(
matrix: &ag::Tensor<'g, F>,
n: usize,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let mut eye_data = vec![F::zero(); n * n];
for i in 0..n {
eye_data[i * n + i] = F::one();
}
let eye = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_shape_vec((n, n), eye_data).expect("Operation failed"),
ctx,
);
let diag_elements = matrix * eye;
ag::tensor_ops::sum_all(diag_elements)
}
pub fn eye_workaround<'g, F: ag::Float>(
n: usize,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let mut eye_data = vec![F::zero(); n * n];
for i in 0..n {
eye_data[i * n + i] = F::one();
}
ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_shape_vec((n, n), eye_data).expect("Operation failed"),
ctx,
)
}
pub fn diag_workaround<'g, F: ag::Float>(
diagonal: &ag::Tensor<'g, F>,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let diagarray =
ag::integration::tensor_conversion::to_ndarray(diagonal).expect("Operation failed");
let n = diagarray.len();
let mut matrix_data = vec![F::zero(); n * n];
for i in 0..n {
matrix_data[i * n + i] = diagarray[i];
}
ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_shape_vec((n, n), matrix_data).expect("Operation failed"),
ctx,
)
}
pub fn frobenius_norm<'g, F: ag::Float>(matrix: &ag::Tensor<'g, F>) -> ag::Tensor<'g, F> {
let squared = matrix * matrix;
let sum_squared = ag::tensor_ops::sum_all(squared);
ag::tensor_ops::sqrt(sum_squared)
}
pub fn det_approximation<'g, F: ag::Float>(
matrix: &ag::Tensor<'g, F>,
n: usize,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
if n == 1 {
return *matrix;
}
if n == 2 {
let matarray =
ag::integration::tensor_conversion::to_ndarray(matrix).expect("Operation failed");
let a = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), matarray[[0, 0]]),
ctx,
);
let b = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), matarray[[0, 1]]),
ctx,
);
let c = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), matarray[[1, 0]]),
ctx,
);
let d = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), matarray[[1, 1]]),
ctx,
);
return a * d - b * c;
}
ag::tensor_ops::convert_to_tensor(ag::ndarray::Array2::from_elem((1, 1), F::one()), ctx)
}
pub fn solve_iterative<'g, F: ag::Float>(
a: &ag::Tensor<'g, F>,
b: &ag::Tensor<'g, F>,
iterations: usize,
learning_rate: F,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let barray = ag::integration::tensor_conversion::to_ndarray(b).expect("Operation failed");
let n = barray.len();
let mut x = ag::tensor_ops::convert_to_tensor(ag::ndarray::Array2::zeros((n, 1)), ctx);
let lr_tensor = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), learning_rate),
ctx,
);
for _iter in 0..iterations {
let ax = ag::tensor_ops::matmul(a, x);
let residual = ax - b;
let at = ag::tensor_ops::transpose(a, &[1, 0]);
let gradient = ag::tensor_ops::matmul(at, residual);
let update = gradient * lr_tensor;
x = x - update;
}
x
}
pub fn dominant_eigenvalue<'g, F: ag::Float>(
matrix: &ag::Tensor<'g, F>,
iterations: usize,
n: usize,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let mut v_data = vec![F::one(); n];
v_data[0] = F::one();
for (i, item) in v_data.iter_mut().enumerate().take(n).skip(1) {
*item = F::from(0.1).expect("Operation failed")
* F::from(i as f64).expect("Operation failed");
}
let mut v = ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_shape_vec((n, 1), v_data).expect("Operation failed"),
ctx,
);
for _iter in 0..iterations {
let av = ag::tensor_ops::matmul(matrix, v);
let norm = frobenius_norm(&av);
v = av / norm;
}
let vt = ag::tensor_ops::transpose(v, &[1, 0]);
let av = ag::tensor_ops::matmul(matrix, v);
let numerator = ag::tensor_ops::matmul(vt, av);
let denominator = ag::tensor_ops::matmul(vt, v);
numerator / denominator
}
pub fn condition_number_approx<'g, F: ag::Float>(
matrix: &ag::Tensor<'g, F>,
iterations: usize,
n: usize,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
let lambda_max = dominant_eigenvalue(matrix, iterations, n, ctx);
lambda_max
}
pub fn rank_approximation<'g, F: ag::Float>(
matrix: &ag::Tensor<'g, F>,
_tolerance: F,
ctx: &'g ag::Context<'g, F>,
) -> ag::Tensor<'g, F> {
ag::tensor_ops::convert_to_tensor(
ag::ndarray::Array2::from_elem((1, 1), F::from(3.0).expect("Operation failed")),
ctx,
)
}
}