use super::curved_elements::gauss_legendre_1d;
use crate::error::{IntegrateError, IntegrateResult};
pub fn legendre_gauss_lobatto(n: usize) -> IntegrateResult<(Vec<f64>, Vec<f64>)> {
if n < 2 {
return Err(IntegrateError::ValueError(
"LGL requires at least 2 nodes".into(),
));
}
if n == 2 {
return Ok((vec![-1.0, 1.0], vec![1.0, 1.0]));
}
let n_int = n - 2; let mut nodes = Vec::with_capacity(n);
nodes.push(-1.0);
let mut interior = Vec::with_capacity(n_int);
for i in 0..n_int {
let idx = (i + 1) as f64;
let theta = std::f64::consts::PI * idx / (n as f64 - 1.0);
interior.push(-theta.cos());
}
for xi in interior.iter_mut() {
for _ in 0..50 {
let p = legendre_poly(n - 1, *xi);
let dp = legendre_deriv_poly(n - 1, *xi);
let x = *xi;
let denom = 1.0 - x * x;
let ddp = if denom.abs() > 1e-12 {
(2.0 * x * dp - (n as f64 - 1.0) * n as f64 * p) / denom
} else {
1e10 * dp
};
if ddp.abs() < 1e-300 {
break;
}
let dx = -dp / ddp;
*xi += dx;
if dx.abs() < 1e-15 {
break;
}
}
nodes.push(*xi);
}
nodes.push(1.0);
let nn = n as f64;
let mut weights = Vec::with_capacity(n);
for &x in nodes.iter() {
let p = legendre_poly(n - 1, x);
let w = 2.0 / (nn * (nn - 1.0) * p * p);
weights.push(w);
}
Ok((nodes, weights))
}
pub fn legendre_poly(n: usize, x: f64) -> f64 {
if n == 0 {
return 1.0;
}
if n == 1 {
return x;
}
let mut p_prev = 1.0;
let mut p_curr = x;
for k in 2..=n {
let kf = k as f64;
let p_next = ((2.0 * kf - 1.0) * x * p_curr - (kf - 1.0) * p_prev) / kf;
p_prev = p_curr;
p_curr = p_next;
}
p_curr
}
pub fn legendre_deriv_poly(n: usize, x: f64) -> f64 {
if n == 0 {
return 0.0;
}
let denom = x * x - 1.0;
if denom.abs() < 1e-12 {
let sign = if x > 0.0 {
1.0
} else {
(-1_f64).powi(n as i32 + 1)
};
return sign * (n as f64) * (n as f64 + 1.0) * 0.5;
}
let pn = legendre_poly(n, x);
let pn1 = legendre_poly(n - 1, x);
(n as f64) * (x * pn - pn1) / denom
}
pub fn differentiation_matrix_lgl(nodes: &[f64]) -> Vec<Vec<f64>> {
let n = nodes.len();
let mut d = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
if i != j {
let pn_i = legendre_poly(n - 1, nodes[i]);
let pn_j = legendre_poly(n - 1, nodes[j]);
if pn_j.abs() < 1e-14 {
d[i][j] = 0.0;
} else {
d[i][j] = (pn_i / pn_j) / (nodes[i] - nodes[j]);
}
}
}
let row_sum: f64 = (0..n).filter(|&j| j != i).map(|j| d[i][j]).sum();
d[i][i] = -row_sum;
}
d
}
pub struct SbpOperator {
pub d: Vec<Vec<f64>>,
pub q: Vec<Vec<f64>>,
pub b: Vec<f64>,
pub mass: Vec<f64>,
pub nodes: Vec<f64>,
pub n: usize,
}
impl SbpOperator {
pub fn new(n: usize) -> IntegrateResult<Self> {
let (nodes, weights) = legendre_gauss_lobatto(n)?;
let d = differentiation_matrix_lgl(&nodes);
let mut q = vec![vec![0.0; n]; n];
for i in 0..n {
for j in 0..n {
q[i][j] = weights[i] * d[i][j];
}
}
let mut b = vec![0.0; n];
b[0] = -1.0;
b[n - 1] = 1.0;
Ok(Self {
d,
q,
b,
mass: weights,
nodes,
n,
})
}
pub fn entropy_identity_error(&self) -> f64 {
let n = self.n;
let mut max_err = 0.0_f64;
for i in 0..n {
for j in 0..n {
let q_plus_qt = self.q[i][j] + self.q[j][i];
let b_ij = if i == j { self.b[i] } else { 0.0 };
let err = (q_plus_qt - b_ij).abs();
if err > max_err {
max_err = err;
}
}
}
max_err
}
}
pub fn rusanov_flux(u_l: f64, u_r: f64, f_fn: impl Fn(f64) -> f64) -> f64 {
let fl = f_fn(u_l);
let fr = f_fn(u_r);
let lambda = u_l.abs().max(u_r.abs());
0.5 * (fl + fr) - 0.5 * lambda * (u_r - u_l)
}
pub fn entropy_stable_flux_burgers(u_l: f64, u_r: f64) -> f64 {
let f_ec = (u_l * u_l + u_l * u_r + u_r * u_r) / 6.0;
let lambda = u_l.abs().max(u_r.abs());
f_ec - 0.5 * lambda * (u_r - u_l)
}
pub fn flux_differencing_volume(
u_elem: &[f64],
d_mat: &[Vec<f64>],
flux_fn: impl Fn(f64, f64) -> f64,
) -> Vec<f64> {
let n = u_elem.len();
let mut rhs = vec![0.0_f64; n];
for i in 0..n {
let mut sum = 0.0;
for j in 0..n {
sum += d_mat[i][j] * flux_fn(u_elem[i], u_elem[j]);
}
rhs[i] = 2.0 * sum;
}
rhs
}
pub struct EntropyStableDg1D {
pub sbp: SbpOperator,
pub x_edges: Vec<f64>,
pub n_elements: usize,
pub tau: f64,
}
impl EntropyStableDg1D {
pub fn new(
a: f64,
b: f64,
n_elements: usize,
n_nodes: usize,
tau: f64,
) -> IntegrateResult<Self> {
let sbp = SbpOperator::new(n_nodes)?;
let mut x_edges = Vec::with_capacity(n_elements + 1);
for i in 0..=n_elements {
x_edges.push(a + (b - a) * i as f64 / n_elements as f64);
}
Ok(Self {
sbp,
x_edges,
n_elements,
tau,
})
}
fn h_e(&self, e: usize) -> f64 {
self.x_edges[e + 1] - self.x_edges[e]
}
fn map_to_physical(&self, e: usize, x_ref: f64) -> f64 {
let a = self.x_edges[e];
let b = self.x_edges[e + 1];
0.5 * (a + b) + 0.5 * (b - a) * x_ref
}
pub fn project_initial_condition(&self, f: impl Fn(f64) -> f64) -> Vec<Vec<f64>> {
let n = self.sbp.n;
let mut u = vec![vec![0.0_f64; n]; self.n_elements];
for e in 0..self.n_elements {
for (j, &xi) in self.sbp.nodes.iter().enumerate() {
u[e][j] = f(self.map_to_physical(e, xi));
}
}
u
}
pub fn compute_rhs(&self, u: &[Vec<f64>]) -> Vec<Vec<f64>> {
let n = self.sbp.n;
let n_elem = self.n_elements;
let mut rhs = vec![vec![0.0_f64; n]; n_elem];
for e in 0..n_elem {
let h = self.h_e(e);
let jac = h * 0.5;
let vol = flux_differencing_volume(&u[e], &self.sbp.d, entropy_stable_flux_burgers);
for j in 0..n {
rhs[e][j] -= vol[j] / jac;
}
let u_left_minus = if e > 0 { u[e - 1][n - 1] } else { u[e][0] }; let u_left_plus = u[e][0];
let f_left = entropy_stable_flux_burgers(u_left_minus, u_left_plus);
let u_right_minus = u[e][n - 1];
let u_right_plus = if e < n_elem - 1 {
u[e + 1][0]
} else {
u[e][n - 1]
}; let f_right = entropy_stable_flux_burgers(u_right_minus, u_right_plus);
rhs[e][0] += (f_left - burgers_flux(u[e][0])) / (self.sbp.mass[0] * jac);
rhs[e][n - 1] -= (f_right - burgers_flux(u[e][n - 1])) / (self.sbp.mass[n - 1] * jac);
}
rhs
}
pub fn step(&self, u: &[Vec<f64>], dt: f64) -> Vec<Vec<f64>> {
let rhs = self.compute_rhs(u);
let n = self.sbp.n;
let mut u_new = vec![vec![0.0_f64; n]; self.n_elements];
for e in 0..self.n_elements {
for j in 0..n {
u_new[e][j] = u[e][j] + dt * rhs[e][j];
}
}
u_new
}
pub fn entropy_rate_check(&self, u: &[Vec<f64>]) -> f64 {
let rhs = self.compute_rhs(u);
let n = self.sbp.n;
let mut rate = 0.0;
for e in 0..self.n_elements {
let h = self.h_e(e);
let jac = h * 0.5;
for j in 0..n {
rate += self.sbp.mass[j] * jac * u[e][j] * rhs[e][j];
}
}
rate
}
}
#[inline]
pub fn burgers_flux(u: f64) -> f64 {
u * u * 0.5
}
#[cfg(test)]
mod tests {
use super::super::curved_elements::gauss_legendre_1d;
use super::*;
#[test]
fn test_lgl_nodes_count() {
for n in 2..=7 {
let (nodes, weights) = legendre_gauss_lobatto(n).unwrap();
assert_eq!(nodes.len(), n, "LGL({n}) should return {n} nodes");
assert_eq!(weights.len(), n);
}
}
#[test]
fn test_lgl_endpoints() {
for n in 2..=6 {
let (nodes, _) = legendre_gauss_lobatto(n).unwrap();
assert!(
(nodes[0] + 1.0).abs() < 1e-12,
"LGL first node should be -1"
);
assert!(
(nodes[n - 1] - 1.0).abs() < 1e-12,
"LGL last node should be +1"
);
}
}
#[test]
fn test_sbp_entropy_identity() {
for n in 2..=5 {
let sbp = SbpOperator::new(n).unwrap();
let err = sbp.entropy_identity_error();
assert!(
err < 1e-10,
"SBP identity Q + Q^T = B violated for n={n}: error = {err}"
);
}
}
#[test]
fn test_rusanov_flux_consistency() {
let us = [-2.0, -0.5, 0.0, 0.5, 1.0, 3.0];
for u in us {
let f_star = rusanov_flux(u, u, burgers_flux);
let f_exact = burgers_flux(u);
assert!(
(f_star - f_exact).abs() < 1e-12,
"Consistency failed: f*({u},{u}) = {f_star}, expected {f_exact}"
);
}
}
#[test]
fn test_entropy_stable_flux_dissipative() {
let u_l = 2.0;
let u_r = 0.5;
let f_star = entropy_stable_flux_burgers(u_l, u_r);
let q_r = u_r.powi(3) / 3.0;
let q_l = u_l.powi(3) / 3.0;
let lhs = f_star * (u_r - u_l);
let rhs = q_r - q_l;
assert!(
lhs <= rhs + 1e-12,
"Entropy-stable flux should satisfy lhs ≤ rhs: lhs={lhs}, rhs={rhs}"
);
}
#[test]
fn test_flux_differencing_skew_symmetry() {
let n = 4;
let sbp = SbpOperator::new(n).unwrap();
let u = vec![1.0, 0.5, 0.3, 0.8];
let rhs = flux_differencing_volume(&u, &sbp.d, entropy_stable_flux_burgers);
let dot: f64 = (0..n).map(|i| sbp.mass[i] * u[i] * rhs[i]).sum();
assert!(
dot.is_finite(),
"Flux differencing should give finite result"
);
}
#[test]
fn test_differentiation_matrix_exact() {
let n = 4;
let sbp = SbpOperator::new(n).unwrap();
let nodes = &sbp.nodes;
let u: Vec<f64> = nodes.to_vec();
let mut dp = vec![0.0_f64; n];
for i in 0..n {
for j in 0..n {
dp[i] += sbp.d[i][j] * u[j];
}
}
for i in 0..n {
assert!(
(dp[i] - 1.0).abs() < 1e-10,
"D*x should give 1.0 at node {i}: got {}",
dp[i]
);
}
}
#[test]
fn test_entropy_rate_negative() {
let solver = EntropyStableDg1D::new(0.0, 1.0, 4, 4, 1.0).unwrap();
let u = solver.project_initial_condition(|x| if x < 0.5 { 1.0 } else { -0.5 });
let rate = solver.entropy_rate_check(&u);
assert!(
rate <= 1e-10,
"Entropy rate should be ≤ 0 for entropy-stable scheme, got {rate}"
);
}
#[test]
fn test_entropy_stable_config_default() {
use super::super::types::{EntropyStableConfig, FluxType};
let cfg = EntropyStableConfig::default();
assert_eq!(cfg.flux, FluxType::EntropyStable);
assert!(cfg.tau > 0.0);
}
}