use super::dual::{Dual, DualVector};
use crate::common::IntegrateFloat;
use crate::error::{IntegrateError, IntegrateResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
pub struct ForwardAD<F: IntegrateFloat> {
n_vars: usize,
tolerance: F,
}
impl<F: IntegrateFloat> ForwardAD<F> {
pub fn new(n_vars: usize) -> Self {
ForwardAD {
n_vars,
tolerance: F::from(1e-12).expect("Failed to convert constant to float"),
}
}
pub fn with_tolerance(mut self, tol: F) -> Self {
self.tolerance = tol;
self
}
pub fn gradient<Func>(&self, f: Func, x: ArrayView1<F>) -> IntegrateResult<Array1<F>>
where
Func: Fn(&[Dual<F>]) -> Dual<F>,
{
if x.len() != self.n_vars {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} variables, got {}",
self.n_vars,
x.len()
)));
}
let mut gradient = Array1::zeros(self.n_vars);
for i in 0..self.n_vars {
let mut dual_x = Vec::with_capacity(self.n_vars);
for (j, &val) in x.iter().enumerate() {
if i == j {
dual_x.push(Dual::variable(val));
} else {
dual_x.push(Dual::constant(val));
}
}
let result = f(&dual_x);
gradient[i] = result.derivative();
}
Ok(gradient)
}
pub fn jacobian<Func>(&self, f: Func, x: ArrayView1<F>) -> IntegrateResult<Array2<F>>
where
Func: Fn(&[Dual<F>]) -> Vec<Dual<F>>,
{
if x.len() != self.n_vars {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} variables, got {}",
self.n_vars,
x.len()
)));
}
let constant_x: Vec<_> = x.iter().map(|&v| Dual::constant(v)).collect();
let output = f(&constant_x);
let m = output.len();
let mut jacobian = Array2::zeros((m, self.n_vars));
for j in 0..self.n_vars {
let mut dual_x = Vec::with_capacity(self.n_vars);
for (k, &val) in x.iter().enumerate() {
if j == k {
dual_x.push(Dual::variable(val));
} else {
dual_x.push(Dual::constant(val));
}
}
let result = f(&dual_x);
for (i, res) in result.iter().enumerate() {
jacobian[[i, j]] = res.derivative();
}
}
Ok(jacobian)
}
pub fn directional_derivative<Func>(
&self,
f: Func,
x: ArrayView1<F>,
direction: ArrayView1<F>,
) -> IntegrateResult<F>
where
Func: Fn(&[Dual<F>]) -> Dual<F>,
{
if x.len() != self.n_vars || direction.len() != self.n_vars {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} variables, got x: {}, direction: {}",
self.n_vars,
x.len(),
direction.len()
)));
}
let dual_x: Vec<_> = x
.iter()
.zip(direction.iter())
.map(|(&val, &der)| Dual::new(val, der))
.collect();
let result = f(&dual_x);
Ok(result.derivative())
}
}
#[allow(dead_code)]
pub fn forward_gradient<F, Func>(f: Func, x: ArrayView1<F>) -> IntegrateResult<Array1<F>>
where
F: IntegrateFloat,
Func: Fn(&[Dual<F>]) -> Dual<F>,
{
let ad = ForwardAD::new(x.len());
ad.gradient(f, x)
}
#[allow(dead_code)]
pub fn forward_jacobian<F, Func>(f: Func, x: ArrayView1<F>) -> IntegrateResult<Array2<F>>
where
F: IntegrateFloat,
Func: Fn(&[Dual<F>]) -> Vec<Dual<F>>,
{
let ad = ForwardAD::new(x.len());
ad.jacobian(f, x)
}
#[allow(dead_code)]
pub fn example_rosenbrock_gradient<F: IntegrateFloat>() -> IntegrateResult<()> {
let rosenbrock = |x: &[Dual<F>]| {
let one = Dual::constant(F::one());
let hundred = Dual::constant(F::from(100.0).expect("Failed to convert constant to float"));
let term1 = (one - x[0]) * (one - x[0]);
let term2 = hundred * (x[1] - x[0] * x[0]) * (x[1] - x[0] * x[0]);
term1 + term2
};
let x = Array1::from_vec(vec![
F::from(1.0).expect("Failed to convert constant to float"),
F::from(2.0).expect("Failed to convert constant to float"),
]);
let grad = forward_gradient(rosenbrock, x.view())?;
println!("Gradient at (1,2): {grad:?}");
Ok(())
}
pub struct ForwardODEJacobian<F: IntegrateFloat> {
_n_states: usize,
ad_engine: ForwardAD<F>,
}
impl<F: IntegrateFloat> ForwardODEJacobian<F> {
pub fn new(_nstates: usize) -> Self {
ForwardODEJacobian {
_n_states: _nstates,
ad_engine: ForwardAD::new(_nstates),
}
}
pub fn compute<Func>(&self, f: Func, t: F, y: ArrayView1<F>) -> IntegrateResult<Array2<F>>
where
Func: Fn(F, &[Dual<F>]) -> Vec<Dual<F>>,
{
self.ad_engine.jacobian(|dual_y| f(t, dual_y), y)
}
}
pub struct VectorizedForwardAD<F: IntegrateFloat> {
n_vars: usize,
n_directions: usize,
#[allow(dead_code)]
tolerance: F,
}
impl<F: IntegrateFloat> VectorizedForwardAD<F> {
pub fn new(n_vars: usize, n_directions: usize) -> Self {
VectorizedForwardAD {
n_vars,
n_directions,
tolerance: F::from(1e-12).expect("Failed to convert constant to float"),
}
}
pub fn directional_derivatives<Func>(
&self,
f: Func,
x: ArrayView1<F>,
directions: &[ArrayView1<F>],
) -> IntegrateResult<Array1<F>>
where
Func: Fn(&[DualVector<F>]) -> DualVector<F>,
{
if x.len() != self.n_vars {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} variables, got {}",
self.n_vars,
x.len()
)));
}
if directions.len() != self.n_directions {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} directions, got {}",
self.n_directions,
directions.len()
)));
}
let mut dual_x = Vec::with_capacity(self.n_vars);
for i in 0..self.n_vars {
let mut derivatives = Array1::zeros(self.n_directions);
for (j, dir) in directions.iter().enumerate() {
derivatives[j] = dir[i];
}
dual_x.push(DualVector {
values: Array1::from_elem(1, x[i]),
jacobian: Array1::from_elem(1, derivatives),
});
}
let result = f(&dual_x);
Ok(result.jacobian[0].clone())
}
pub fn jacobian_vectorized<Func>(
&self,
f: Func,
x: ArrayView1<F>,
chunk_size: usize,
) -> IntegrateResult<Array2<F>>
where
Func: Fn(&[DualVector<F>]) -> Vec<DualVector<F>> + Clone,
{
if x.len() != self.n_vars {
return Err(IntegrateError::DimensionMismatch(format!(
"Expected {} variables, got {}",
self.n_vars,
x.len()
)));
}
let constant_x: Vec<_> = (0..self.n_vars)
.map(|i| DualVector::constant(Array1::from_elem(1, x[i])))
.collect();
let output = f(&constant_x);
let m = output.len();
let mut jacobian = Array2::zeros((m, self.n_vars));
for chunk_start in (0..self.n_vars).step_by(chunk_size) {
let chunk_end = (chunk_start + chunk_size).min(self.n_vars);
let chunk_width = chunk_end - chunk_start;
let mut dual_x = Vec::with_capacity(self.n_vars);
for i in 0..self.n_vars {
let mut derivatives = Array1::zeros(chunk_width);
if i >= chunk_start && i < chunk_end {
derivatives[i - chunk_start] = F::one();
}
dual_x.push(DualVector {
values: Array1::from_elem(1, x[i]),
jacobian: Array1::from_elem(1, derivatives),
});
}
let result = f(&dual_x);
for (i, res) in result.iter().enumerate() {
for j in 0..chunk_width {
jacobian[[i, chunk_start + j]] = res.jacobian[0][j];
}
}
}
Ok(jacobian)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_forward_gradient() {
let f = |x: &[Dual<f64>]| x[0] * x[0] + x[1] * x[1];
let x = Array1::from_vec(vec![3.0, 4.0]);
let grad = forward_gradient(f, x.view()).expect("Operation failed");
assert!((grad[0] - 6.0).abs() < 1e-10);
assert!((grad[1] - 8.0).abs() < 1e-10);
}
#[test]
fn test_forward_jacobian() {
let f = |x: &[Dual<f64>]| vec![x[0] * x[0], x[0] * x[1], x[1] * x[1]];
let x = Array1::from_vec(vec![2.0, 3.0]);
let jac = forward_jacobian(f, x.view()).expect("Operation failed");
assert!((jac[[0, 0]] - 4.0).abs() < 1e-10); assert!((jac[[0, 1]] - 0.0).abs() < 1e-10);
assert!((jac[[1, 0]] - 3.0).abs() < 1e-10); assert!((jac[[1, 1]] - 2.0).abs() < 1e-10); assert!((jac[[2, 0]] - 0.0).abs() < 1e-10);
assert!((jac[[2, 1]] - 6.0).abs() < 1e-10); }
}