use crate::common::IntegrateFloat;
use crate::dae::index_reduction::{DAEStructure, PantelidesReducer};
use crate::dae::methods::bdf_dae::bdf_semi_explicit_dae;
use crate::dae::types::{DAEIndex, DAEOptions, DAEResult, DAEType};
use crate::error::{IntegrateError, IntegrateResult};
use crate::ode::utils::jacobian::JacobianStrategy;
use crate::ode::{solve_ivp, ODEOptions};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
#[allow(dead_code)]
pub fn solve_semi_explicit_dae<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
t_span: [F; 2],
x0: Array1<F>,
y0: Array1<F>,
options: Option<DAEOptions<F>>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let opts = options.unwrap_or_default();
let g_init = g(t_span[0], x0.view(), y0.view());
let constraint_error = g_init
.iter()
.fold(F::zero(), |acc, &x| acc + x.powi(2))
.sqrt();
let tol = F::from_f64(1e-8).expect("Operation failed");
if constraint_error > tol {
return Err(IntegrateError::ValueError(format!(
"Initial condition does not satisfy constraints. Error: {constraint_error}"
)));
}
let n_x = x0.len();
let n_y = y0.len();
let n_total = n_x + n_y;
let mut z0 = Array1::zeros(n_total);
for i in 0..n_x {
z0[i] = x0[i];
}
for i in 0..n_y {
z0[n_x + i] = y0[i];
}
let f_closure = f.clone();
let g_closure = g.clone();
let combined_system = move |t: F, z: ArrayView1<F>| -> Array1<F> {
let x = z.slice(scirs2_core::ndarray::s![0..n_x]);
let y = z.slice(scirs2_core::ndarray::s![n_x..]);
let x_dot = f_closure(t, x, y);
let g_val = g_closure(t, x, y);
let x_slice: Vec<F> = x.to_vec();
let y_slice: Vec<F> = y.to_vec();
let g_y = crate::dae::utils::compute_constraint_jacobian(
&|t, x, y| g_closure(t, ArrayView1::from(x), ArrayView1::from(y)).to_vec(),
t,
&x_slice,
&y_slice,
);
let g_x = compute_jacobian_x(
&g_closure,
t,
x,
y,
F::from_f64(1e-8).expect("Operation failed"),
);
let dt = F::from_f64(1e-8).expect("Operation failed");
let g_t_plus = g_closure(t + dt, x, y);
let g_t = (g_t_plus - g_val.clone()) / dt;
let g_x_dot = g_x.dot(&x_dot);
let rhs = &g_t + &g_x_dot;
let y_dot = match solve_matrix_system(g_y.view(), rhs.view()) {
Ok(solution) => -solution, Err(_) => Array1::zeros(n_y), };
let mut z_dot = Array1::zeros(n_total);
for i in 0..n_x {
z_dot[i] = x_dot[i];
}
for i in 0..n_y {
z_dot[n_x + i] = y_dot[i];
}
z_dot
};
let ode_options = ODEOptions {
method: opts.method,
rtol: opts.rtol,
atol: opts.atol,
h0: opts.h0,
max_steps: opts.max_steps,
max_step: opts.max_step,
min_step: opts.min_step,
dense_output: true,
max_order: Some(5),
jac: None,
jacobian_strategy: Some(JacobianStrategy::Adaptive),
..Default::default()
};
let ode_result = solve_ivp(combined_system, t_span, z0, Some(ode_options))?;
let mut x_results: Vec<Array1<F>> = Vec::with_capacity(ode_result.t.len());
let mut y_results: Vec<Array1<F>> = Vec::with_capacity(ode_result.t.len());
for z in &ode_result.y {
let x_part = z.slice(scirs2_core::ndarray::s![0..n_x]).to_owned();
let y_part = z.slice(scirs2_core::ndarray::s![n_x..]).to_owned();
x_results.push(x_part);
y_results.push(y_part);
}
let dae_result = DAEResult {
t: ode_result.t,
x: x_results,
y: y_results,
success: ode_result.success,
message: ode_result.message,
n_eval: ode_result.n_eval,
n_constraint_eval: ode_result.n_eval, n_steps: ode_result.n_steps,
n_accepted: ode_result.n_accepted,
n_rejected: ode_result.n_rejected,
n_lu: ode_result.n_lu,
n_jac: ode_result.n_jac,
method: ode_result.method,
dae_type: DAEType::SemiExplicit,
index: DAEIndex::Index1,
};
Ok(dae_result)
}
#[allow(dead_code)]
fn compute_jacobian_x<F, GFunc>(
g: &GFunc,
t: F,
x: ArrayView1<F>,
y: ArrayView1<F>,
epsilon: F,
) -> Array2<F>
where
F: IntegrateFloat + std::default::Default,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let n_x = x.len();
let n_g = g(t, x, y).len();
let mut jac = Array2::zeros((n_g, n_x));
let g_base = g(t, x, y);
let mut x_plus = x.to_owned();
for i in 0..n_x {
let h = epsilon.max(x[i].abs() * epsilon);
x_plus[i] = x[i] + h;
let g_plus = g(t, x_plus.view(), y);
x_plus[i] = x[i];
let column = (g_plus - &g_base) / h;
for j in 0..n_g {
jac[[j, i]] = column[j];
}
}
jac
}
#[allow(dead_code)]
fn solve_matrix_system<F>(matrix: ArrayView2<F>, b: ArrayView1<F>) -> IntegrateResult<Array1<F>>
where
F: IntegrateFloat + std::default::Default,
{
use crate::dae::utils::linear_solvers::solve_linear_system;
solve_linear_system(&matrix.view(), &b.view()).map_err(|err| {
IntegrateError::ComputationError(format!("Failed to solve linear system: {err}"))
})
}
#[allow(dead_code)]
pub fn solve_implicit_dae<F, FFunc>(
f: FFunc,
t_span: [F; 2],
y0: Array1<F>,
y_prime0: Array1<F>,
options: Option<DAEOptions<F>>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let opts = options.unwrap_or_default();
let residual = f(t_span[0], y0.view(), y_prime0.view());
let residual_norm = residual
.iter()
.fold(F::zero(), |acc, &x| acc + x.powi(2))
.sqrt();
let tol = F::from_f64(1e-8).expect("Operation failed");
if residual_norm > tol {
return Err(IntegrateError::ValueError(format!(
"Initial conditions are not consistent with the DAE. Residual norm: {residual_norm}"
)));
}
let n = y0.len();
let mut t_values = vec![t_span[0]];
let mut y_values = vec![y0.clone()];
let mut y_prime_values = vec![y_prime0.clone()];
let mut step = 0;
let mut t_current = t_span[0];
let mut y_current = y0.clone();
let mut y_prime_current = y_prime0.clone();
let mut h = opts.h0.unwrap_or_else(|| {
let _span = t_span[1] - t_span[0];
_span * F::from_f64(0.01).expect("Operation failed") });
let min_step = opts.min_step.unwrap_or_else(|| {
let _span = t_span[1] - t_span[0];
_span * F::from_f64(1e-6).expect("Operation failed") });
let max_step = opts.max_step.unwrap_or_else(|| {
let _span = t_span[1] - t_span[0];
_span * F::from_f64(0.1).expect("Operation failed") });
let eval_bdf_formula = |y_new: ArrayView1<F>,
y_history: &[Array1<F>],
_y_prime_new: ArrayView1<F>,
t_new: F,
k: usize,
step_size: F|
-> Array1<F> {
let bdf_coeffs = match k {
1 => vec![F::one(), -F::one()], 2 => vec![
F::from_f64(4.0 / 3.0).expect("Operation failed"),
-F::from_f64(1.0 / 3.0).expect("Operation failed"),
-F::one(),
],
3 => vec![
F::from_f64(18.0 / 11.0).expect("Operation failed"),
-F::from_f64(9.0 / 11.0).expect("Operation failed"),
F::from_f64(2.0 / 11.0).expect("Operation failed"),
-F::one(),
],
4 => vec![
F::from_f64(48.0 / 25.0).expect("Operation failed"),
-F::from_f64(36.0 / 25.0).expect("Operation failed"),
F::from_f64(16.0 / 25.0).expect("Operation failed"),
-F::from_f64(3.0 / 25.0).expect("Operation failed"),
-F::one(),
],
5 => vec![
F::from_f64(300.0 / 137.0).expect("Operation failed"),
-F::from_f64(300.0 / 137.0).expect("Operation failed"),
F::from_f64(200.0 / 137.0).expect("Operation failed"),
-F::from_f64(75.0 / 137.0).expect("Operation failed"),
F::from_f64(12.0 / 137.0).expect("Operation failed"),
-F::one(),
],
_ => return Array1::zeros(n), };
let mut bdf_derivative: Array1<F> = Array1::zeros(n);
for i in 0..=k {
if i == 0 {
bdf_derivative += &(&y_new * bdf_coeffs[i]);
} else if i <= y_history.len() {
bdf_derivative += &(&y_history[y_history.len() - i] * bdf_coeffs[i]);
}
}
bdf_derivative /= step_size;
f(t_new, y_new, bdf_derivative.view())
};
let mut n_steps = 0;
let mut n_accepted = 0;
let mut n_rejected = 0;
let mut n_jac = 0;
let mut n_lu = 0;
let mut n_eval = 0;
while t_current < t_span[1] && step < opts.max_steps {
if t_current + h > t_span[1] {
h = t_span[1] - t_current;
}
let t_new = t_current + h;
let order = (step + 1).min(5).min(opts.max_order.unwrap_or(5));
let history_start = if step >= order { step - order + 1 } else { 0 };
let y_history = y_values[history_start..=step].to_vec();
let mut y_pred = match step {
0 => y_current.clone(), _ => {
let mut pred = y_current.clone();
if step > 0 {
let step_ratio = h / (t_current - t_values[step - 1]);
pred = pred + &(&y_current - &y_values[step - 1]) * step_ratio;
}
pred
}
};
let mut y_prime_pred = if step == 0 {
y_prime_current.clone() } else {
let mut pred = y_prime_current.clone();
if step > 0 {
let step_ratio = h / (t_current - t_values[step - 1]);
pred = pred + &(&y_prime_current - &y_prime_values[step - 1]) * step_ratio;
}
pred
};
let max_iter = opts.max_newton_iterations;
let newton_tol = opts.newton_tol;
let mut converged = false;
for _iter in 0..max_iter {
let residual = if step == 0 || order == 1 {
let y_prime_bdf = (&y_pred - &y_current) / h;
f(t_new, y_pred.view(), y_prime_bdf.view())
} else {
eval_bdf_formula(
y_pred.view(),
&y_history,
y_prime_pred.view(),
t_new,
order,
h,
)
};
n_eval += 1;
let residual_norm = residual
.iter()
.fold(F::zero(), |acc, &x| acc + x.powi(2))
.sqrt();
if residual_norm < newton_tol {
converged = true;
break;
}
let jacobian_t = compute_time_jacobian(&f, t_new, &y_pred, &y_prime_pred)?;
let f_current = f(t_new, y_pred.view(), y_prime_pred.view());
let jacobian_y = crate::ode::utils::common::finite_difference_jacobian(
&|t, y| f(t, y, y_prime_pred.view()),
t_new,
&y_pred,
&f_current,
F::from_f64(1e-8).expect("Operation failed"),
);
let jacobian_y_prime = crate::ode::utils::common::finite_difference_jacobian(
&|t, y_prime| f(t, y_pred.view(), y_prime),
t_new,
&y_prime_pred,
&f_current,
F::from_f64(1e-8).expect("Operation failed"),
);
n_jac += 3;
let mut jacobian_effective = jacobian_y.clone();
let use_time_jacobian = true; if use_time_jacobian {
let time_contribution_factor = F::from_f64(0.01).expect("Operation failed"); for i in 0..n {
jacobian_effective[[i, i]] += jacobian_t[i] * time_contribution_factor;
}
}
if step == 0 || order == 1 {
let scale = F::one() / h;
for i in 0..n {
for j in 0..n {
if i == j {
jacobian_effective[[i, j]] += jacobian_y_prime[[i, j]] * scale;
}
}
}
} else {
let bdf_coeff = match order {
2 => F::from_f64(4.0 / 3.0).expect("Operation failed"),
3 => F::from_f64(18.0 / 11.0).expect("Operation failed"),
4 => F::from_f64(48.0 / 25.0).expect("Operation failed"),
5 => F::from_f64(300.0 / 137.0).expect("Operation failed"),
_ => F::one(), };
let scale = bdf_coeff / h;
for i in 0..n {
for j in 0..n {
if i == j {
jacobian_effective[[i, j]] += jacobian_y_prime[[i, j]] * scale;
}
}
}
}
let neg_residual = residual.mapv(|x| -x);
let dy = match solve_matrix_system(jacobian_effective.view(), neg_residual.view()) {
Ok(sol) => sol,
Err(e) => {
return Err(IntegrateError::ComputationError(format!(
"Failed to solve Newton system at t = {t_new}: {e}"
)));
}
};
n_lu += 1;
let mut alpha = F::one(); let min_alpha = F::from_f64(0.1).expect("Operation failed");
while alpha >= min_alpha {
let y_new = &y_pred + &(&dy * alpha);
let y_prime_new = if step == 0 || order == 1 {
(&y_new - &y_current) / h
} else {
let _y_prime: Array1<F> = Array1::zeros(n);
let bdf_coeffs = match order {
2 => vec![
F::from_f64(4.0 / 3.0).expect("Operation failed"),
-F::from_f64(1.0 / 3.0).expect("Operation failed"),
-F::one(),
],
3 => vec![
F::from_f64(18.0 / 11.0).expect("Operation failed"),
-F::from_f64(9.0 / 11.0).expect("Operation failed"),
F::from_f64(2.0 / 11.0).expect("Operation failed"),
-F::one(),
],
4 => vec![
F::from_f64(48.0 / 25.0).expect("Operation failed"),
-F::from_f64(36.0 / 25.0).expect("Operation failed"),
F::from_f64(16.0 / 25.0).expect("Operation failed"),
-F::from_f64(3.0 / 25.0).expect("Operation failed"),
-F::one(),
],
5 => vec![
F::from_f64(300.0 / 137.0).expect("Operation failed"),
-F::from_f64(300.0 / 137.0).expect("Operation failed"),
F::from_f64(200.0 / 137.0).expect("Operation failed"),
-F::from_f64(75.0 / 137.0).expect("Operation failed"),
F::from_f64(12.0 / 137.0).expect("Operation failed"),
-F::one(),
],
_ => vec![F::one(), -F::one()], };
let mut bdf_derivative = Array1::zeros(n);
for i in 0..=order {
if i == 0 {
bdf_derivative += &(&y_new * bdf_coeffs[i]);
} else if i <= y_history.len() {
bdf_derivative += &(&y_history[y_history.len() - i] * bdf_coeffs[i]);
}
}
bdf_derivative / h
};
let new_residual = f(t_new, y_new.view(), y_prime_new.view());
n_eval += 1;
let new_residual_norm = new_residual
.iter()
.fold(F::zero(), |acc, &x| acc + x.powi(2))
.sqrt();
if new_residual_norm < residual_norm {
y_pred = y_new;
y_prime_pred = y_prime_new;
break;
}
alpha *= F::from_f64(0.5).expect("Operation failed");
}
if alpha < min_alpha {
break;
}
}
n_steps += 1;
if converged {
n_accepted += 1;
t_values.push(t_new);
y_values.push(y_pred.clone());
y_prime_values.push(y_prime_pred.clone());
t_current = t_new;
y_current = y_pred.clone();
y_prime_current = y_prime_pred.clone();
step += 1;
h = (h * F::from_f64(1.1).expect("Operation failed")).min(max_step);
} else {
n_rejected += 1;
h = (h * F::from_f64(0.5).expect("Operation failed")).max(min_step);
if h <= min_step {
return Err(IntegrateError::ComputationError(
format!("Integration failed at t = {t_current}. Step size got too small. The DAE might be too stiff or have index greater than 1.")
));
}
}
}
let success = t_current >= t_span[1];
let message = if success {
Some(format!(
"Integration successful. {n_steps} steps taken, {n_accepted} accepted, {n_rejected} rejected."
))
} else {
Some(format!("Integration did not reach the end of the interval. {n_steps} steps taken, {n_accepted} accepted, {n_rejected} rejected."))
};
let mut y_result = Vec::with_capacity(t_values.len());
let empty_array = Array1::<F>::zeros(0);
let y_empty = vec![empty_array; t_values.len()];
for y in y_values {
y_result.push(y);
}
let dae_result = DAEResult {
t: t_values,
x: y_result,
y: y_empty, success,
message,
n_eval,
n_constraint_eval: n_eval, n_steps,
n_accepted,
n_rejected,
n_lu,
n_jac,
method: opts.method,
dae_type: DAEType::FullyImplicit,
index: DAEIndex::Index1,
};
Ok(dae_result)
}
#[allow(dead_code)]
pub fn solve_higher_index_dae<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
t_span: [F; 2],
x0: Array1<F>,
y0: Array1<F>,
options: Option<DAEOptions<F>>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let opts = options.unwrap_or_default();
let f_orig = f.clone();
let g_orig = g.clone();
let f_wrapped =
move |t: F, x: ArrayView1<F>, y: ArrayView1<F>| -> Array1<F> { f_orig(t, x, y) };
let g_wrapped =
move |t: F, x: ArrayView1<F>, y: ArrayView1<F>| -> Array1<F> { g_orig(t, x, y) };
let mut structure =
crate::dae::index_reduction::DAEStructure::new_semi_explicit(x0.len(), y0.len());
let index = structure.compute_index(t_span[0], x0.view(), y0.view(), &f, &g)?;
if index == DAEIndex::Index1 {
return solve_semi_explicit_dae(f, g, t_span, x0, y0, Some(opts));
}
match opts.index {
DAEIndex::Index1 => {
let mut reducer = crate::dae::index_reduction::PantelidesReducer::new(structure);
if let Err(_e) = reducer.reduce_index(t_span[0], x0.view(), y0.view(), &f, &g) {
let projection = crate::dae::index_reduction::ProjectionMethod::new(
crate::dae::index_reduction::DAEStructure::new_semi_explicit(
x0.len(),
y0.len(),
),
);
let mut x0_copy = x0.clone();
let mut y0_copy = y0.clone();
if let Err(e) =
projection.make_consistent(t_span[0], &mut x0_copy, &mut y0_copy, &g)
{
return Err(IntegrateError::ComputationError(format!(
"Failed to find consistent initial conditions for higher-index DAE: {e}"
)));
}
return solve_semi_explicit_dae_with_projection(
f_wrapped,
g_wrapped,
t_span,
x0_copy,
y0_copy,
Some(opts),
);
}
use crate::dae::index_reduction::{DAEStructure, PantelidesReducer};
let mut structure = DAEStructure::default();
structure.n_differential = x0.len();
structure.n_algebraic = y0.len();
structure.n_diff_eqs = x0.len();
structure.n_alg_eqs = y0.len();
let mut reducer = PantelidesReducer::new(structure);
reducer.reduce_index(
F::from_f64(t_span[0].to_f64().unwrap_or(0.0)).unwrap_or(F::zero()),
x0.view(),
y0.view(),
&f_wrapped,
&g_wrapped,
)?;
solve_index1_dae_with_reduced_structure(
f_wrapped,
g_wrapped,
t_span,
x0,
y0,
opts,
reducer.structure,
)
}
DAEIndex::Index2 => {
solve_index2_dae_specialized(f_wrapped, g_wrapped, t_span, x0, y0, opts)
}
DAEIndex::Index3 => {
solve_index3_dae_specialized(f_wrapped, g_wrapped, t_span, x0, y0, opts)
}
DAEIndex::HigherIndex => {
let mut structure = DAEStructure::default();
structure.n_differential = x0.len();
structure.n_algebraic = y0.len();
structure.n_diff_eqs = x0.len();
structure.n_alg_eqs = y0.len();
structure.index = DAEIndex::HigherIndex;
let mut reducer = PantelidesReducer::new(structure);
reducer.max_diff_steps = 10;
reducer.reduce_index(
F::from_f64(t_span[0].to_f64().unwrap_or(0.0)).unwrap_or(F::zero()),
x0.view(),
y0.view(),
&f_wrapped,
&g_wrapped,
)?;
solve_index1_dae_with_reduced_structure(
f_wrapped,
g_wrapped,
t_span,
x0,
y0,
opts,
reducer.structure,
)
}
}
}
#[allow(dead_code)]
fn solve_semi_explicit_dae_with_projection<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
t_span: [F; 2],
x0: Array1<F>,
y0: Array1<F>,
options: Option<DAEOptions<F>>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let projection = crate::dae::index_reduction::ProjectionMethod::new(
crate::dae::index_reduction::DAEStructure::new_semi_explicit(x0.len(), y0.len()),
);
let mut opts = options.unwrap_or_default();
if let Some(max_step) = opts.max_step {
opts.max_step = Some(max_step * F::from_f64(0.5).expect("Operation failed"));
}
opts.rtol *= F::from_f64(0.1).expect("Operation failed");
opts.atol *= F::from_f64(0.1).expect("Operation failed");
let f_clone = f.clone();
let g_clone = g.clone();
let original_result =
solve_semi_explicit_dae(f_clone, g_clone, t_span, x0, y0, Some(opts.clone()))?;
if !projection.project_after_step {
return Ok(original_result);
}
let mut x_projected = Vec::with_capacity(original_result.t.len());
let mut y_projected = Vec::with_capacity(original_result.t.len());
for i in 0..original_result.t.len() {
let t = original_result.t[i];
let mut x = original_result.x[i].clone();
let mut y = original_result.y[i].clone();
let _ = projection.project_solution(t, &mut x, &mut y, &g);
x_projected.push(x);
y_projected.push(y);
}
let t_len = original_result.t.len();
let projected_result = DAEResult {
t: original_result.t,
x: x_projected,
y: y_projected,
success: original_result.success,
message: original_result.message,
n_eval: original_result.n_eval,
n_constraint_eval: original_result.n_constraint_eval + t_len,
n_steps: original_result.n_steps,
n_accepted: original_result.n_accepted,
n_rejected: original_result.n_rejected,
n_lu: original_result.n_lu,
n_jac: original_result.n_jac,
method: original_result.method,
dae_type: DAEType::SemiExplicit,
index: DAEIndex::Index2, };
Ok(projected_result)
}
#[allow(dead_code)]
pub fn solve_ivp_dae<F, FFunc>(
f: FFunc,
t_span: [F; 2],
y0: Array1<F>,
y_prime0: Option<Array1<F>>,
options: Option<DAEOptions<F>>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let opts = options.unwrap_or_default();
match opts.dae_type {
DAEType::SemiExplicit => {
Err(IntegrateError::ValueError(
"For semi-explicit DAEs, please use solve_semi_explicit_dae directly.".to_string(),
))
}
DAEType::FullyImplicit => {
if let Some(yprime0) = y_prime0 {
solve_implicit_dae(f, t_span, y0, yprime0, Some(opts))
} else {
Err(IntegrateError::ValueError(
"Initial derivatives are required for fully implicit DAEs.".to_string(),
))
}
}
}
}
#[allow(dead_code)]
fn solve_index1_dae_with_reduced_structure<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
t_span: [F; 2],
x0: Array1<F>,
y0: Array1<F>,
options: DAEOptions<F>,
_structure: crate::dae::index_reduction::DAEStructure<F>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
use crate::dae::methods::bdf_dae::bdf_semi_explicit_dae;
bdf_semi_explicit_dae(f, g, t_span, x0, y0, options)
}
#[allow(dead_code)]
fn solve_index2_dae_specialized<F, FFunc, GFunc>(
_f: FFunc,
_g: GFunc,
_t_span: [F; 2],
_x0: Array1<F>,
_y0: Array1<F>,
_options: DAEOptions<F>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
Err(IntegrateError::NotImplementedError(
"Index2 systems are not yet implemented. Please use index reduction or reformulate as index-1 DAE.".to_string()
))
}
#[allow(dead_code)]
fn solve_index3_dae_specialized<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
t_span: [F; 2],
x0: Array1<F>,
y0: Array1<F>,
options: DAEOptions<F>,
) -> IntegrateResult<DAEResult<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let mut modified_options = options;
if modified_options.h0.is_none() {
modified_options.h0 =
Some(F::from_f64(1e-8).unwrap_or(F::from_f64(1e-6).expect("Operation failed")));
}
modified_options.rtol *= F::from_f64(0.01).expect("Operation failed");
modified_options.atol *= F::from_f64(0.01).expect("Operation failed");
modified_options.max_newton_iterations = modified_options.max_newton_iterations.max(50);
bdf_semi_explicit_dae(f, g, t_span, x0, y0, modified_options)
}
#[allow(dead_code)]
fn apply_dummy_derivative_method<F, FFunc, GFunc>(
f: FFunc,
g: GFunc,
_x0: &Array1<F>,
_y0: &Array1<F>,
) -> IntegrateResult<(
impl Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
impl Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
)>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
GFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F> + Clone,
{
let f_clone = f.clone();
let g_clone = g.clone();
let stabilized_f = move |t: F, x: ArrayView1<F>, y: ArrayView1<F>| -> Array1<F> {
let mut result = f_clone(t, x, y);
let penalty_factor =
F::from_f64(1e3).unwrap_or(F::from_f64(100.0).expect("Operation failed"));
let constraint_violation = g_clone(t, x, y);
for i in 0..result.len().min(constraint_violation.len()) {
result[i] -= penalty_factor * constraint_violation[i];
}
result
};
let stabilized_g = move |t: F, x: ArrayView1<F>, y: ArrayView1<F>| -> Array1<F> { g(t, x, y) };
Ok((stabilized_f, stabilized_g))
}
#[allow(dead_code)]
fn compute_time_jacobian<F, FFunc>(
f: &FFunc,
t: F,
y: &Array1<F>,
y_prime: &Array1<F>,
) -> IntegrateResult<Array1<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let h = F::from_f64(1e-8)
.expect("Operation failed")
.max(F::epsilon().sqrt() * t.abs());
let h = h.max(F::from_f64(1e-12).expect("Operation failed"));
let f_base = f(t, y.view(), y_prime.view());
let t_plus_h = t + h;
let f_perturbed = f(t_plus_h, y.view(), y_prime.view());
let time_jacobian = f_perturbed
.iter()
.zip(f_base.iter())
.map(|(&f_plus, &f_base)| (f_plus - f_base) / h)
.collect::<Array1<F>>();
Ok(time_jacobian)
}
#[allow(dead_code)]
fn compute_adaptive_time_jacobian<F, FFunc>(
f: &FFunc,
t: F,
y: &Array1<F>,
y_prime: &Array1<F>,
tolerance: F,
) -> IntegrateResult<Array1<F>>
where
F: IntegrateFloat + std::default::Default,
FFunc: Fn(F, ArrayView1<F>, ArrayView1<F>) -> Array1<F>,
{
let mut h = F::from_f64(1e-6)
.expect("Operation failed")
.max(F::epsilon().sqrt() * t.abs());
let f_base = f(t, y.view(), y_prime.view());
loop {
let f_forward = f(t + h, y.view(), y_prime.view());
let jacobian_forward = f_forward
.iter()
.zip(f_base.iter())
.map(|(&f_plus, &f_base)| (f_plus - f_base) / h)
.collect::<Array1<F>>();
let f_backward = f(t - h, y.view(), y_prime.view());
let jacobian_central = f_forward
.iter()
.zip(f_backward.iter())
.map(|(&f_plus, &f_minus)| (f_plus - f_minus) / (h + h))
.collect::<Array1<F>>();
let error_estimate = jacobian_forward
.iter()
.zip(jacobian_central.iter())
.map(|(&forward, ¢ral)| (forward - central).abs())
.fold(F::zero(), |acc, err| acc.max(err));
if error_estimate <= tolerance || h <= F::from_f64(1e-12).expect("Operation failed") {
return Ok(jacobian_central);
}
h *= F::from_f64(0.5).expect("Operation failed");
}
}