use crate::dae::types::DAEIndex;
use crate::dae::utils::{compute_constraint_jacobian, is_singular_matrix};
use crate::error::{IntegrateError, IntegrateResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, ScalarOperand};
use scirs2_core::numeric::{Float, FromPrimitive};
use std::fmt::{Debug, Display, LowerExp};
#[derive(Debug, Clone)]
pub struct DAEStructure<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> {
pub n_differential: usize,
pub n_algebraic: usize,
pub n_diff_eqs: usize,
pub n_alg_eqs: usize,
pub index: DAEIndex,
pub diff_index: usize,
pub constraint_jacobian: Option<Array2<F>>,
pub derivative_jacobian: Option<Array2<F>>,
pub incidence_matrix: Option<Array2<bool>>,
pub variable_dependencies: Option<Vec<Vec<usize>>>,
pub equation_dependencies: Option<Vec<Vec<usize>>>,
}
impl<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> Default for DAEStructure<F>
{
fn default() -> Self {
DAEStructure {
n_differential: 0,
n_algebraic: 0,
n_diff_eqs: 0,
n_alg_eqs: 0,
index: DAEIndex::Index1,
diff_index: 1,
constraint_jacobian: None,
derivative_jacobian: None,
incidence_matrix: None,
variable_dependencies: None,
equation_dependencies: None,
}
}
}
impl<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> DAEStructure<F>
{
pub fn new_semi_explicit(n_differential: usize, nalgebraic: usize) -> Self {
DAEStructure {
n_differential,
n_algebraic: nalgebraic,
n_diff_eqs: n_differential,
n_alg_eqs: nalgebraic,
index: DAEIndex::Index1, diff_index: 1, constraint_jacobian: None,
derivative_jacobian: None,
incidence_matrix: None,
variable_dependencies: None,
equation_dependencies: None,
}
}
pub fn new_fully_implicit(n_equations: usize, nvariables: usize) -> Self {
DAEStructure {
n_differential: nvariables, n_algebraic: 0,
n_diff_eqs: n_equations,
n_alg_eqs: 0,
index: DAEIndex::Index1, diff_index: 1, constraint_jacobian: None,
derivative_jacobian: None,
incidence_matrix: None,
variable_dependencies: None,
equation_dependencies: None,
}
}
pub fn compute_index<FFunc, GFunc>(
&mut self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
f: &FFunc,
g: &GFunc,
) -> IntegrateResult<DAEIndex>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let x_slice: Vec<F> = x.to_vec();
let y_slice: Vec<F> = y.to_vec();
let g_y = compute_constraint_jacobian(
&|t, x, y| g(t, ArrayView1::from(x), ArrayView1::from(y)).to_vec(),
t,
&x_slice,
&y_slice,
);
self.constraint_jacobian = Some(g_y.clone());
let singular = is_singular_matrix(g_y.view());
if !singular {
self.index = DAEIndex::Index1;
self.diff_index = 1;
return Ok(DAEIndex::Index1);
}
let _f_x = compute_jacobian_for_variables(f, t, x, y, 0, self.n_differential)?;
let f_y =
compute_jacobian_for_variables(f, t, x, y, self.n_differential, self.n_algebraic)?;
let g_x = compute_jacobian_for_variables(g, t, x, y, 0, self.n_differential)?;
let mut index2_matrix = Array2::<F>::zeros((self.n_alg_eqs, self.n_algebraic));
let g_x_f_y = g_x.dot(&f_y);
for i in 0..self.n_alg_eqs {
for j in 0..self.n_algebraic {
index2_matrix[[i, j]] = g_y[[i, j]];
if j < g_x_f_y.dim().1 {
index2_matrix[[i, j]] += g_x_f_y[[i, j]];
}
}
}
let index2_singular = is_singular_matrix(index2_matrix.view());
if !index2_singular {
self.index = DAEIndex::Index2;
self.diff_index = 2;
return Ok(DAEIndex::Index2);
}
self.index = DAEIndex::Index3;
self.diff_index = 3;
Ok(DAEIndex::Index3)
}
}
pub struct PantelidesReducer<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> {
pub structure: DAEStructure<F>,
pub max_diff_steps: usize,
pub tol: F,
}
impl<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> PantelidesReducer<F>
{
pub fn new(structure: DAEStructure<F>) -> Self {
PantelidesReducer {
structure,
max_diff_steps: 5, tol: F::from_f64(1e-10).expect("Operation failed"),
}
}
pub fn initialize_incidence_matrix<FFunc, GFunc>(
&mut self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
f: &FFunc,
g: &GFunc,
) -> IntegrateResult<()>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let n_diff = self.structure.n_differential;
let n_alg = self.structure.n_algebraic;
let n_diff_eqs = self.structure.n_diff_eqs;
let n_alg_eqs = self.structure.n_alg_eqs;
let n_vars = n_diff + n_alg;
let n_eqs = n_diff_eqs + n_alg_eqs;
let mut incidence = Array2::<bool>::from_elem((n_eqs, n_vars), false);
let f_x = compute_jacobian_for_variables(f, t, x, y, 0, n_diff)?;
let f_y = compute_jacobian_for_variables(f, t, x, y, n_diff, n_alg)?;
let g_x = compute_jacobian_for_variables(g, t, x, y, 0, n_diff)?;
let g_y = compute_jacobian_for_variables(g, t, x, y, n_diff, n_alg)?;
for i in 0..n_diff_eqs {
for j in 0..n_diff {
if f_x[[i, j]].abs() > self.tol {
incidence[[i, j]] = true;
}
}
for j in 0..n_alg {
if j < f_y.dim().1 && f_y[[i, j]].abs() > self.tol {
incidence[[i, n_diff + j]] = true;
}
}
}
for i in 0..n_alg_eqs {
for j in 0..n_diff {
if j < g_x.dim().1 && g_x[[i, j]].abs() > self.tol {
incidence[[n_diff_eqs + i, j]] = true;
}
}
for j in 0..n_alg {
if j < g_y.dim().1 && g_y[[i, j]].abs() > self.tol {
incidence[[n_diff_eqs + i, n_diff + j]] = true;
}
}
}
self.structure.incidence_matrix = Some(incidence);
let mut var_deps = Vec::with_capacity(n_vars);
let mut eq_deps = Vec::with_capacity(n_eqs);
for j in 0..n_vars {
let mut deps = Vec::new();
for i in 0..n_eqs {
if let Some(incidence) = &self.structure.incidence_matrix {
if incidence[[i, j]] {
deps.push(i);
}
}
}
var_deps.push(deps);
}
for i in 0..n_eqs {
let mut deps = Vec::new();
for j in 0..n_vars {
if let Some(incidence) = &self.structure.incidence_matrix {
if incidence[[i, j]] {
deps.push(j);
}
}
}
eq_deps.push(deps);
}
self.structure.variable_dependencies = Some(var_deps);
self.structure.equation_dependencies = Some(eq_deps);
Ok(())
}
pub fn reduce_index<FFunc, GFunc>(
&mut self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
f: &FFunc,
g: &GFunc,
) -> IntegrateResult<()>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
self.structure.compute_index(t, x, y, f, g)?;
if self.structure.index == DAEIndex::Index1 {
return Ok(());
}
if self.structure.incidence_matrix.is_none() {
self.initialize_incidence_matrix(t, x, y, f, g)?;
}
let max_iterations = 10;
let mut current_iteration = 0;
while self.structure.index != DAEIndex::Index1 && current_iteration < max_iterations {
current_iteration += 1;
let singular_subsets = self.find_singular_subsets()?;
if singular_subsets.is_empty() {
break;
}
let differentiated_equations =
self.differentiate_singular_equations(&singular_subsets)?;
self.update_structure_after_differentiation(&differentiated_equations)?;
self.structure.compute_index(t, x, y, f, g)?;
if self.structure.diff_index > 0 {
self.structure.diff_index -= 1;
}
self.structure.index = match self.structure.diff_index {
0 | 1 => DAEIndex::Index1,
2 => DAEIndex::Index2,
3 => DAEIndex::Index3,
_ => DAEIndex::HigherIndex,
};
}
if self.structure.index != DAEIndex::Index1 {
return Err(IntegrateError::ConvergenceError(format!(
"Failed to reduce DAE to index-1 after {max_iterations} iterations"
)));
}
Ok(())
}
fn find_singular_subsets(&self) -> IntegrateResult<Vec<Vec<usize>>> {
let mut singular_subsets = Vec::new();
if let Some(ref incidence) = self.structure.incidence_matrix {
let n_eqs = incidence.nrows();
let n_vars = incidence.ncols();
for subset_size in 2..=std::cmp::min(n_eqs, 6) {
let equation_combinations = generate_combinations(n_eqs, subset_size);
for eq_subset in equation_combinations {
let mut involved_vars = std::collections::HashSet::new();
for &eq_idx in &eq_subset {
for var_idx in 0..n_vars {
if incidence[[eq_idx, var_idx]] {
involved_vars.insert(var_idx);
}
}
}
if eq_subset.len() > involved_vars.len() {
singular_subsets.push(eq_subset);
}
}
}
}
Ok(singular_subsets)
}
fn differentiate_singular_equations(
&self,
singular_subsets: &[Vec<usize>],
) -> IntegrateResult<Vec<usize>> {
let mut equations_to_differentiate = Vec::new();
for subset in singular_subsets {
if !subset.is_empty() {
equations_to_differentiate.push(subset[0]);
}
}
Ok(equations_to_differentiate)
}
fn update_structure_after_differentiation(
&mut self,
differentiated_equations: &[usize],
) -> IntegrateResult<()> {
if differentiated_equations.is_empty() {
return Ok(());
}
let num_new_variables = differentiated_equations.len();
let old_n_diff = self.structure.n_differential;
let old_n_alg = self.structure.n_algebraic;
let old_n_vars = old_n_diff + old_n_alg;
let old_n_eqs = self.structure.n_diff_eqs + self.structure.n_alg_eqs;
self.structure.n_differential += num_new_variables;
self.structure.n_diff_eqs += num_new_variables;
let new_n_vars = self.structure.n_differential + self.structure.n_algebraic;
let new_n_eqs = self.structure.n_diff_eqs + self.structure.n_alg_eqs;
if let Some(ref old_incidence) = self.structure.incidence_matrix.clone() {
let mut new_incidence = Array2::<bool>::from_elem((new_n_eqs, new_n_vars), false);
for i in 0..old_n_eqs {
for j in 0..old_n_vars {
new_incidence[[i, j]] = old_incidence[[i, j]];
}
}
for (new_eq_idx, &orig_eq_idx) in differentiated_equations.iter().enumerate() {
let new_eq_row = old_n_eqs + new_eq_idx;
let new_var_col = old_n_vars + new_eq_idx;
new_incidence[[new_eq_row, new_var_col]] = true;
for j in 0..old_n_vars {
if orig_eq_idx < old_n_eqs && old_incidence[[orig_eq_idx, j]] {
new_incidence[[new_eq_row, j]] = true;
}
}
for j in 0..old_n_diff {
if orig_eq_idx < old_n_eqs && old_incidence[[orig_eq_idx, j]] {
new_incidence[[new_eq_row, j]] = true;
}
}
}
self.structure.incidence_matrix = Some(new_incidence);
}
self.update_dependencies_after_structure_change()?;
if self.structure.diff_index > 1 {
self.structure.diff_index -= 1;
}
self.structure.constraint_jacobian = None;
self.structure.derivative_jacobian = None;
Ok(())
}
fn update_dependencies_after_structure_change(&mut self) -> IntegrateResult<()> {
if let Some(ref incidence) = self.structure.incidence_matrix {
let n_eqs = incidence.nrows();
let n_vars = incidence.ncols();
let mut var_deps = Vec::new();
for j in 0..n_vars {
let mut deps = Vec::new();
for i in 0..n_eqs {
if incidence[[i, j]] {
deps.push(i);
}
}
var_deps.push(deps);
}
let mut eq_deps = Vec::new();
for i in 0..n_eqs {
let mut deps = Vec::new();
for j in 0..n_vars {
if incidence[[i, j]] {
deps.push(j);
}
}
eq_deps.push(deps);
}
self.structure.variable_dependencies = Some(var_deps);
self.structure.equation_dependencies = Some(eq_deps);
}
Ok(())
}
}
#[derive(Debug, Clone)]
struct DummySelection<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> {
dummy_vars: Vec<usize>,
dummy_eqs: Vec<usize>,
q: Array2<F>,
r: Array2<F>,
}
pub struct DummyDerivativeReducer<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> {
pub structure: DAEStructure<F>,
pub dummy_variables: Vec<usize>,
pub dummy_equations: Vec<usize>,
}
impl<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> DummyDerivativeReducer<F>
{
pub fn new(structure: DAEStructure<F>) -> Self {
DummyDerivativeReducer {
structure,
dummy_variables: Vec::new(),
dummy_equations: Vec::new(),
}
}
pub fn reduce_index<FFunc, GFunc>(
&mut self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
f: &FFunc,
g: &GFunc,
) -> IntegrateResult<()>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
self.structure.compute_index(t, x, y, f, g)?;
if self.structure.index == DAEIndex::Index1 {
return Ok(());
}
let constraint_derivative = self.differentiate_constraints(t, x, y, g)?;
let extended_jacobian =
self.build_extended_jacobian(t, x, y, f, g, &constraint_derivative)?;
let dummy_selection = self.select_dummy_variables(&extended_jacobian)?;
self.dummy_variables = dummy_selection.dummy_vars;
self.dummy_equations = dummy_selection.dummy_eqs;
self.update_structure_with_dummies()?;
let final_index = self.verify_reduced_index(t, x, y, f, g)?;
if final_index != DAEIndex::Index1 {
return Err(IntegrateError::ComputationError(format!(
"Dummy derivative method failed to reduce to index-1. Final index: {final_index:?}"
)));
}
self.structure.index = DAEIndex::Index1;
self.structure.diff_index = 1;
Ok(())
}
fn differentiate_constraints<GFunc>(
&self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
g: &GFunc,
) -> IntegrateResult<Array1<F>>
where
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let h = F::from_f64(1e-8).expect("Operation failed");
let g0 = g(t, x, y);
let g_plus_t = g(t + h, x, y);
let dg_dt = (&g_plus_t - &g0) / h;
Ok(dg_dt)
}
fn build_extended_jacobian<FFunc, GFunc>(
&self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
f: &FFunc,
g: &GFunc,
constraint_derivative: &Array1<F>,
) -> IntegrateResult<Array2<F>>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let n_diff = self.structure.n_differential;
let n_alg = self.structure.n_algebraic;
let n_constraints = constraint_derivative.len();
let extended_size = n_diff + n_alg + n_constraints;
let mut extended_jacobian = Array2::<F>::zeros((extended_size, extended_size));
let f_x = compute_jacobian_for_variables(f, t, x, y, 0, n_diff)?;
let f_y = compute_jacobian_for_variables(f, t, x, y, n_diff, n_alg)?;
let g_x = compute_jacobian_for_variables(g, t, x, y, 0, n_diff)?;
let g_y = compute_jacobian_for_variables(g, t, x, y, n_diff, n_alg)?;
for i in 0..n_diff {
for j in 0..n_diff {
if j < f_x.dim().1 {
extended_jacobian[[i, j]] = f_x[[i, j]];
}
}
for j in 0..n_alg {
if j < f_y.dim().1 {
extended_jacobian[[i, n_diff + j]] = f_y[[i, j]];
}
}
}
for i in 0..n_alg {
for j in 0..n_diff {
if j < g_x.dim().1 && i < g_x.dim().0 {
extended_jacobian[[n_diff + i, j]] = g_x[[i, j]];
}
}
for j in 0..n_alg {
if j < g_y.dim().1 && i < g_y.dim().0 {
extended_jacobian[[n_diff + i, n_diff + j]] = g_y[[i, j]];
}
}
}
for i in 0..n_constraints {
for j in 0..n_diff {
if j < g_x.dim().1 && i < g_x.dim().0 {
extended_jacobian[[n_diff + n_alg + i, j]] = g_x[[i, j]];
}
}
for j in 0..n_alg {
if j < g_y.dim().1 && i < g_y.dim().0 {
extended_jacobian[[n_diff + n_alg + i, n_diff + j]] = g_y[[i, j]];
}
}
if i < extended_size - n_diff - n_alg {
extended_jacobian[[n_diff + n_alg + i, n_diff + n_alg + i]] = F::one();
}
}
Ok(extended_jacobian)
}
fn select_dummy_variables(
&self,
extended_jacobian: &Array2<F>,
) -> IntegrateResult<DummySelection<F>> {
let n_diff = self.structure.n_differential;
let n_alg = self.structure.n_algebraic;
let total_vars = n_diff + n_alg;
let (q, r, pivots) = self.qr_decomposition_with_pivoting(extended_jacobian)?;
let rank = Self::compute_matrix_rank(&r)?;
let n_dummy = total_vars.saturating_sub(rank);
let mut dummy_vars = Vec::new();
let mut dummy_eqs = Vec::new();
for i in rank..total_vars.min(pivots.len()) {
if pivots[i] < total_vars {
dummy_vars.push(pivots[i]);
dummy_eqs.push(n_diff + n_alg + dummy_eqs.len());
}
}
while dummy_vars.len() < n_dummy && dummy_vars.len() < total_vars {
for i in 0..total_vars {
if !dummy_vars.contains(&i) && dummy_vars.len() < n_dummy {
dummy_vars.push(i);
dummy_eqs.push(n_diff + n_alg + dummy_eqs.len());
}
}
}
Ok(DummySelection {
dummy_vars,
dummy_eqs,
q,
r,
})
}
fn qr_decomposition_with_pivoting(
&self,
matrix: &Array2<F>,
) -> IntegrateResult<(Array2<F>, Array2<F>, Vec<usize>)> {
let (m, n) = matrix.dim();
let mut a = matrix.clone();
let q = Array2::<F>::eye(m);
let mut pivots: Vec<usize> = (0..n).collect();
for k in 0..std::cmp::min(m, n) {
let mut max_norm = F::zero();
let mut max_col = k;
for j in k..n {
let col_norm = (k..m)
.map(|i| a[[i, j]].powi(2))
.fold(F::zero(), |acc, x| acc + x)
.sqrt();
if col_norm > max_norm {
max_norm = col_norm;
max_col = j;
}
}
if max_col != k {
for i in 0..m {
let temp = a[[i, k]];
a[[i, k]] = a[[i, max_col]];
a[[i, max_col]] = temp;
}
pivots.swap(k, max_col);
}
if max_norm > F::from_f64(1e-12).expect("Operation failed") {
for i in k..m {
a[[i, k]] /= max_norm;
}
for j in (k + 1)..n {
let dot_product = (k..m)
.map(|i| a[[i, k]] * a[[i, j]])
.fold(F::zero(), |acc, x| acc + x);
for i in k..m {
a[[i, j]] = a[[i, j]] - dot_product * a[[i, k]];
}
}
}
}
let r = a.clone();
Ok((q, r, pivots))
}
fn compute_matrix_rank(r: &Array2<F>) -> IntegrateResult<usize> {
let tolerance = F::from_f64(1e-10).expect("Operation failed");
let min_dim = std::cmp::min(r.nrows(), r.ncols());
let mut rank = 0;
for i in 0..min_dim {
if r[[i, i]].abs() > tolerance {
rank += 1;
}
}
Ok(rank)
}
fn update_structure_with_dummies(&mut self) -> IntegrateResult<()> {
let n_dummy = self.dummy_variables.len();
self.structure.n_differential += n_dummy;
self.structure.n_diff_eqs += n_dummy;
self.structure.constraint_jacobian = None;
self.structure.derivative_jacobian = None;
self.structure.incidence_matrix = None;
Ok(())
}
fn verify_reduced_index<FFunc, GFunc>(
&self,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
_f: &FFunc,
g: &GFunc,
) -> IntegrateResult<DAEIndex>
where
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let x_slice: Vec<F> = x.to_vec();
let y_slice: Vec<F> = y.to_vec();
let g_y = compute_constraint_jacobian(
&|t, x, y| g(t, ArrayView1::from(x), ArrayView1::from(y)).to_vec(),
t,
&x_slice,
&y_slice,
);
let singular = is_singular_matrix(g_y.view());
if !singular {
Ok(DAEIndex::Index1)
} else {
Ok(DAEIndex::Index2) }
}
}
pub struct ProjectionMethod<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> {
pub structure: DAEStructure<F>,
pub project_after_step: bool,
pub consistent_initialization: bool,
pub constraint_tol: F,
}
impl<
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
> ProjectionMethod<F>
{
pub fn new(structure: DAEStructure<F>) -> Self {
ProjectionMethod {
structure,
project_after_step: true,
consistent_initialization: true,
constraint_tol: F::from_f64(1e-8).expect("Operation failed"),
}
}
pub fn project_solution<GFunc>(
&self,
t: F,
x: &mut Array1<F>,
y: &mut Array1<F>,
g: &GFunc,
) -> IntegrateResult<()>
where
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let g_val = g(t, x.view(), y.view());
let g_norm = g_val
.iter()
.fold(F::zero(), |acc, &val| acc + val.powi(2))
.sqrt();
if g_norm <= self.constraint_tol {
return Ok(());
}
let x_slice: Vec<F> = x.to_vec();
let y_slice: Vec<F> = y.to_vec();
let g_y = compute_constraint_jacobian(
&|t, x, y| g(t, ArrayView1::from(x), ArrayView1::from(y)).to_vec(),
t,
&x_slice,
&y_slice,
);
let neg_g = g_val.mapv(|val| -val);
let (delta_y, residual) = solve_constrained_least_squares(g_y.view(), neg_g.view())?;
*y = y.clone() + delta_y;
if residual > self.constraint_tol {
return Err(IntegrateError::ComputationError(format!(
"Failed to project solution onto constraint manifold. Residual: {residual}"
)));
}
Ok(())
}
pub fn make_consistent<GFunc>(
&self,
t: F,
x: &mut Array1<F>,
y: &mut Array1<F>,
g: &GFunc,
) -> IntegrateResult<()>
where
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let max_iter = 10;
let mut iter = 0;
while iter < max_iter {
let g_val = g(t, x.view(), y.view());
let g_norm = g_val
.iter()
.fold(F::zero(), |acc, &val| acc + val.powi(2))
.sqrt();
if g_norm <= self.constraint_tol {
return Ok(());
}
self.project_solution(t, x, y, g)?;
iter += 1;
}
let g_val = g(t, x.view(), y.view());
let g_norm = g_val
.iter()
.fold(F::zero(), |acc, &val| acc + val.powi(2))
.sqrt();
if g_norm > self.constraint_tol {
return Err(IntegrateError::ComputationError(format!(
"Failed to find consistent initial conditions after {max_iter} iterations. Residual: {g_norm}"
)));
}
Ok(())
}
}
#[allow(dead_code)]
fn compute_jacobian_for_variables<F, Func>(
f: &Func,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
start_idx: usize,
n_vars: usize,
) -> IntegrateResult<Array2<F>>
where
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
Func: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let f_val = f(t, x, y);
let n_eqs = f_val.len();
let mut jac = Array2::<F>::zeros((n_eqs, n_vars));
let epsilon = F::from_f64(1e-8).expect("Operation failed");
if start_idx < x.len() {
let end_idx = (start_idx + n_vars).min(x.len());
let n_x_vars = end_idx - start_idx;
let mut x_perturbed = x.to_owned();
for j in 0..n_x_vars {
let _idx = start_idx + j;
let h = epsilon.max(x[_idx].abs() * epsilon);
x_perturbed[_idx] = x[_idx] + h;
let f_plus = f(t, x_perturbed.view(), y);
x_perturbed[_idx] = x[_idx];
let df = (f_plus - f_val.view()) / h;
for i in 0..n_eqs {
jac[[i, j]] = df[i];
}
}
}
if start_idx + n_vars > x.len() {
let n_x_vars = x.len().saturating_sub(start_idx);
let n_y_vars = n_vars - n_x_vars;
let y_start = start_idx.saturating_sub(x.len());
let y_end = (y_start + n_y_vars).min(y.len());
let mut y_perturbed = y.to_owned();
for j in 0..y_end.saturating_sub(y_start) {
let _idx = y_start + j;
let h = epsilon.max(y[_idx].abs() * epsilon);
y_perturbed[_idx] = y[_idx] + h;
let f_plus = f(t, x, y_perturbed.view());
y_perturbed[_idx] = y[_idx];
let df = (f_plus - f_val.view()) / h;
for i in 0..n_eqs {
jac[[i, n_x_vars + j]] = df[i];
}
}
}
Ok(jac)
}
#[allow(dead_code)]
fn solve_constrained_least_squares<F>(
j: ArrayView2<F>,
b: ArrayView1<F>,
) -> IntegrateResult<(Array1<F>, F)>
where
F: Float
+ FromPrimitive
+ Debug
+ ScalarOperand
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ Display
+ LowerExp
+ std::iter::Sum
+ crate::common::IntegrateFloat,
{
use crate::dae::utils::linear_solvers::solve_linear_system;
let j_jt = j.dot(&j.t());
let lambda = match solve_linear_system(&j_jt.view(), &b.view()) {
Ok(sol) => sol,
Err(e) => {
return Err(IntegrateError::ComputationError(format!(
"Failed to solve least squares system: {e}"
)))
}
};
let delta_y = j.t().dot(&lambda);
let residual = (&j.dot(&delta_y) - &b)
.iter()
.fold(F::zero(), |acc, &val| acc + val.powi(2))
.sqrt();
Ok((delta_y, residual))
}
#[allow(dead_code)]
fn generate_combinations(n: usize, k: usize) -> Vec<Vec<usize>> {
if k == 0 || k > n {
return vec![];
}
let mut combinations = Vec::new();
let mut current = Vec::new();
generate_combinations_recursive(0, n, k, &mut current, &mut combinations);
combinations
}
#[allow(dead_code)]
fn generate_combinations_recursive(
start: usize,
n: usize,
k: usize,
current: &mut Vec<usize>,
combinations: &mut Vec<Vec<usize>>,
) {
if current.len() == k {
combinations.push(current.clone());
return;
}
for i in start..n {
if current.len() + (n - i) >= k {
current.push(i);
generate_combinations_recursive(i + 1, n, k, current, combinations);
current.pop();
}
}
}