use super::*;
use crate::Df64;
use dashu_base::Approximation;
use dashu_float::{Context, DBig, round::mode::HalfAway};
use std::str::FromStr;
const DBIG_DIGITS: usize = 100;
const TOLERANCE_F64: f64 = 1e-15;
const TOLERANCE_DF64: f64 = 1e-30;
trait KernelDbigCompute {
fn compute_dbig(lambda: DBig, x: DBig, y: DBig, ctx: &Context<HalfAway>) -> DBig;
}
fn f64_to_dbig(val: f64, precision: usize) -> DBig {
let val_str = format!("{:.30e}", val);
DBig::from_str(&val_str)
.unwrap()
.with_precision(precision)
.unwrap()
}
fn extract_f64(approx: Approximation<f64, dashu_float::round::Rounding>) -> f64 {
match approx {
Approximation::Exact(val) => val,
Approximation::Inexact(val, _) => val,
}
}
impl KernelDbigCompute for LogisticKernel {
fn compute_dbig(lambda: DBig, x: DBig, y: DBig, ctx: &Context<HalfAway>) -> DBig {
let one = f64_to_dbig(1.0, ctx.precision());
let two = f64_to_dbig(2.0, ctx.precision());
let numerator = (-lambda.clone() * y.clone() * (x + one.clone()) / two).exp();
let denominator = one + (-lambda * y).exp();
numerator / denominator
}
}
impl KernelDbigCompute for RegularizedBoseKernel {
fn compute_dbig(lambda: DBig, x: DBig, y: DBig, ctx: &Context<HalfAway>) -> DBig {
let one = f64_to_dbig(1.0, ctx.precision());
let half = f64_to_dbig(0.5, ctx.precision());
let y_f64 = extract_f64(y.to_f64());
if y_f64.abs() < 1e-100 {
let term0 = one.clone() / lambda.clone();
let term1 = half * x.clone() * y.clone();
return term0 - term1;
}
let exp_arg = -lambda.clone() * y.clone() * (x.clone() + one.clone()) * half;
let numerator = y.clone() * exp_arg.exp();
let exp_neg_lambda_y = (-lambda * y.clone()).exp();
let denominator = one - exp_neg_lambda_y;
numerator / denominator
}
}
fn test_kernel_compute_precision_generic<K, T>(
kernel: &K,
lambda: f64,
x_dd: Df64,
y_dd: Df64,
tolerance: f64,
kernel_name: &str,
) where
K: CentrosymmKernel + KernelDbigCompute,
T: CustomNumeric,
{
let x_t: T = T::convert_from(x_dd);
let y_t: T = T::convert_from(y_dd);
let result_t = kernel.compute(x_t, y_t);
let ctx = Context::<HalfAway>::new(DBIG_DIGITS);
let lambda_dbig = f64_to_dbig(lambda, ctx.precision());
let x_dbig = {
let x_str = format!("{:.30e}", x_dd.hi() + x_dd.lo());
DBig::from_str(&x_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let y_dbig = {
let y_str = format!("{:.30e}", y_dd.hi() + y_dd.lo());
DBig::from_str(&y_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let result_dbig = K::compute_dbig(lambda_dbig, x_dbig, y_dbig, &ctx);
let result_dbig_f64 = extract_f64(result_dbig.to_f64());
let result_t_f64 = result_t.to_f64();
let diff = (result_t_f64 - result_dbig_f64).abs();
let rel_error = if result_dbig_f64.abs() > 1e-300 {
diff / result_dbig_f64.abs()
} else {
diff
};
let x_val = x_dd.hi() + x_dd.lo();
let y_val = y_dd.hi() + y_dd.lo();
assert!(
diff < tolerance,
"{}: precision test failed for lambda={}, x={}, y={}\n Expected: {}\n Got: {}\n Absolute error: {} (tolerance: {})\n Relative error: {}",
kernel_name,
lambda,
x_val,
y_val,
result_dbig_f64,
result_t_f64,
diff,
tolerance,
rel_error
);
}
fn test_kernel_compute_precision_generic_noncentrosymm<K, T>(
kernel: &K,
x_dd: Df64,
y_dd: Df64,
tolerance: f64,
kernel_name: &str,
) where
K: AbstractKernel + KernelDbigCompute,
T: CustomNumeric,
{
let x_t: T = T::convert_from(x_dd);
let y_t: T = T::convert_from(y_dd);
let result_t = kernel.compute(x_t, y_t);
let ctx = Context::<HalfAway>::new(DBIG_DIGITS);
let lambda_dbig = f64_to_dbig(10.0, ctx.precision()); let x_dbig = {
let x_str = format!("{:.30e}", x_dd.hi() + x_dd.lo());
DBig::from_str(&x_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let y_dbig = {
let y_str = format!("{:.30e}", y_dd.hi() + y_dd.lo());
DBig::from_str(&y_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let result_dbig = K::compute_dbig(lambda_dbig, x_dbig, y_dbig, &ctx);
let result_dbig_f64 = extract_f64(result_dbig.to_f64());
let result_t_f64 = result_t.to_f64();
let diff = (result_t_f64 - result_dbig_f64).abs();
let rel_error = if result_dbig_f64.abs() > 1e-300 {
diff / result_dbig_f64.abs()
} else {
diff
};
let x_val = x_dd.hi() + x_dd.lo();
let y_val = y_dd.hi() + y_dd.lo();
assert!(
diff < tolerance,
"{}: precision test failed for x={}, y={}\n Expected: {}\n Got: {}\n Absolute error: {} (tolerance: {})\n Relative error: {}",
kernel_name,
x_val,
y_val,
result_dbig_f64,
result_t_f64,
diff,
tolerance,
rel_error
);
}
fn compute_reduced_dbig<K: KernelDbigCompute>(
lambda: DBig,
x: DBig,
y: DBig,
symmetry: SymmetryType,
ctx: &Context<HalfAway>,
) -> DBig {
match symmetry {
SymmetryType::Even => {
let k_plus = K::compute_dbig(lambda.clone(), x.clone(), y.clone(), ctx);
let k_minus = K::compute_dbig(lambda, x, -y, ctx);
k_plus + k_minus
}
SymmetryType::Odd => {
let k_plus = K::compute_dbig(lambda.clone(), x.clone(), y.clone(), ctx);
let k_minus = K::compute_dbig(lambda, x, -y, ctx);
k_plus - k_minus
}
}
}
fn test_kernel_compute_reduced_precision_generic<K, T>(
kernel: &K,
lambda: f64,
x_dd: Df64,
y_dd: Df64,
symmetry: SymmetryType,
tolerance: f64,
kernel_name: &str,
) where
K: CentrosymmKernel + KernelDbigCompute,
T: CustomNumeric,
{
let x_t: T = T::convert_from(x_dd);
let y_t: T = T::convert_from(y_dd);
let result_t = kernel.compute_reduced(x_t, y_t, symmetry);
let ctx = Context::<HalfAway>::new(DBIG_DIGITS);
let lambda_dbig = f64_to_dbig(lambda, ctx.precision());
let x_dbig = {
let x_str = format!("{:.30e}", x_dd.hi() + x_dd.lo());
DBig::from_str(&x_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let y_dbig = {
let y_str = format!("{:.30e}", y_dd.hi() + y_dd.lo());
DBig::from_str(&y_str)
.unwrap()
.with_precision(ctx.precision())
.unwrap()
};
let result_dbig = compute_reduced_dbig::<K>(lambda_dbig, x_dbig, y_dbig, symmetry, &ctx);
let result_dbig_f64 = extract_f64(result_dbig.to_f64());
let result_t_f64 = result_t.to_f64();
let diff = (result_t_f64 - result_dbig_f64).abs();
let rel_error = if result_dbig_f64.abs() > 1e-300 {
diff / result_dbig_f64.abs()
} else {
diff
};
let x_val = x_dd.hi() + x_dd.lo();
let y_val = y_dd.hi() + y_dd.lo();
assert!(
diff < tolerance,
"{}: compute_reduced precision test failed for lambda={}, x={}, y={}, symmetry={:?}\n Expected: {}\n Got: {}\n Absolute error: {} (tolerance: {})\n Relative error: {}",
kernel_name,
lambda,
x_val,
y_val,
symmetry,
result_dbig_f64,
result_t_f64,
diff,
tolerance,
rel_error
);
}
fn test_kernel_precision_different_lambdas<K, F, T>(
kernel_constructor: F,
kernel_name: &str,
test_points: &[(T, T)],
lambdas: &[f64],
) where
K: CentrosymmKernel + KernelDbigCompute + Clone,
F: Fn(f64) -> K,
T: CustomNumeric + Copy,
{
for &lambda in lambdas {
let kernel = kernel_constructor(lambda);
for &(x, y) in test_points {
let x_dd = Df64::convert_from(x);
let y_dd = Df64::convert_from(y);
test_kernel_compute_precision_generic::<K, f64>(
&kernel,
lambda,
x_dd,
y_dd,
TOLERANCE_F64,
kernel_name,
);
test_kernel_compute_precision_generic::<K, Df64>(
&kernel,
lambda,
x_dd,
y_dd,
TOLERANCE_DF64,
kernel_name,
);
test_kernel_compute_reduced_precision_generic::<K, f64>(
&kernel,
lambda,
x_dd,
y_dd,
SymmetryType::Even,
TOLERANCE_F64,
kernel_name,
);
test_kernel_compute_reduced_precision_generic::<K, Df64>(
&kernel,
lambda,
x_dd,
y_dd,
SymmetryType::Even,
TOLERANCE_DF64,
kernel_name,
);
test_kernel_compute_reduced_precision_generic::<K, f64>(
&kernel,
lambda,
x_dd,
y_dd,
SymmetryType::Odd,
TOLERANCE_F64,
kernel_name,
);
test_kernel_compute_reduced_precision_generic::<K, Df64>(
&kernel,
lambda,
x_dd,
y_dd,
SymmetryType::Odd,
TOLERANCE_DF64,
kernel_name,
);
}
}
}
#[test]
fn test_logistic_kernel_precision_critical_points() {
let test_points_dd: [(Df64, Df64); 10] = [
(Df64::from(0.0), Df64::from(0.0)), (Df64::from(0.01), Df64::from(0.01)), (Df64::from(0.1), Df64::from(0.1)), (Df64::from(0.5), Df64::from(0.5)), (Df64::from(0.9), Df64::from(0.9)), (Df64::from(0.01), Df64::from(0.1)), (Df64::from(0.1), Df64::from(0.01)), (Df64::from(0.99), Df64::from(0.99)), (Df64::from(0.99), Df64::from(0.01)), (Df64::from(0.01), Df64::from(0.99)), ];
let lambdas = [10.0, 1e2, 1e3, 1e4];
test_kernel_precision_different_lambdas::<LogisticKernel, _, Df64>(
|lambda| LogisticKernel::new(lambda),
"LogisticKernel",
&test_points_dd,
&lambdas,
);
let test_points_f64: [(f64, f64); 10] =
test_points_dd.map(|(x_dd, y_dd)| (x_dd.hi() + x_dd.lo(), y_dd.hi() + y_dd.lo()));
test_kernel_precision_different_lambdas::<LogisticKernel, _, f64>(
|lambda| LogisticKernel::new(lambda),
"LogisticKernel",
&test_points_f64,
&lambdas,
);
}
#[test]
fn test_regularized_bose_kernel_discretized_matrix_y0() {
use crate::kernel::SymmetryType;
use crate::kernelmatrix::matrix_from_gauss_with_segments;
let lambda = 1e5;
let epsilon = 1e-10;
let kernel = RegularizedBoseKernel::new(lambda);
let hints = kernel.sve_hints::<f64>(epsilon);
let segments_x = hints.segments_x();
let segments_y = hints.segments_y();
let n_gauss = hints.ngauss();
let rule = crate::gauss::legendre_generic::<f64>(n_gauss);
let gauss_x = rule.piecewise(&segments_x);
let gauss_y = rule.piecewise(&segments_y);
let discretized =
matrix_from_gauss_with_segments(&kernel, &gauss_x, &gauss_y, SymmetryType::Even, &hints);
let mut y0_indices = Vec::new();
let mut min_abs_y = f64::INFINITY;
for (j, &y_gauss) in gauss_y.x.iter().enumerate() {
let abs_y = y_gauss.abs();
if abs_y < min_abs_y {
min_abs_y = abs_y;
y0_indices.clear();
y0_indices.push(j);
} else if (abs_y - min_abs_y).abs() < 1e-15 {
y0_indices.push(j);
}
}
let reduced_expected = 2.0 / lambda;
let tolerance = 1e-3;
for &j in &y0_indices {
let y_gauss = gauss_y.x[j];
for i in [0, gauss_x.x.len() / 2, gauss_x.x.len() - 1] {
let x_gauss = gauss_x.x[i];
let matrix_value = discretized.matrix[[i, j]];
let direct_reduced =
kernel.compute(x_gauss, y_gauss) + kernel.compute(x_gauss, -y_gauss);
let error_matrix_vs_direct_reduced = (matrix_value - direct_reduced).abs();
let error_direct_reduced_vs_expected = (direct_reduced - reduced_expected).abs();
assert!(
error_matrix_vs_direct_reduced < tolerance,
"Matrix value at (i={}, j={}) does not match direct reduced computation: matrix={:.15e}, direct_reduced={:.15e}, error={:.15e}",
i,
j,
matrix_value,
direct_reduced,
error_matrix_vs_direct_reduced
);
assert!(
error_direct_reduced_vs_expected < tolerance,
"Direct reduced value at (x={:.15e}, y={:.15e}) incorrect: direct_reduced={:.15e}, expected={:.15e}, error={:.15e}",
x_gauss,
y_gauss,
direct_reduced,
reduced_expected,
error_direct_reduced_vs_expected
);
}
}
}
#[test]
#[ignore]
fn test_regularized_bose_kernel_precision_critical_points() {
let test_points_dd: [(Df64, Df64); 10] = [
(Df64::from(0.0), Df64::from(0.0)), (Df64::from(0.01), Df64::from(0.01)), (Df64::from(0.1), Df64::from(0.1)), (Df64::from(0.5), Df64::from(0.5)), (Df64::from(0.9), Df64::from(0.9)), (Df64::from(0.01), Df64::from(0.1)), (Df64::from(0.1), Df64::from(0.01)), (Df64::from(0.99), Df64::from(0.99)), (Df64::from(0.99), Df64::from(0.01)), (Df64::from(0.01), Df64::from(0.99)), ];
let lambdas = [10.0, 1e2, 1e3];
test_kernel_precision_different_lambdas::<RegularizedBoseKernel, _, Df64>(
|lambda| RegularizedBoseKernel::new(lambda),
"RegularizedBoseKernel",
&test_points_dd,
&lambdas,
);
let test_points_f64: [(f64, f64); 10] =
test_points_dd.map(|(x_dd, y_dd)| (x_dd.hi() + x_dd.lo(), y_dd.hi() + y_dd.lo()));
test_kernel_precision_different_lambdas::<RegularizedBoseKernel, _, f64>(
|lambda| RegularizedBoseKernel::new(lambda),
"RegularizedBoseKernel",
&test_points_f64,
&lambdas,
);
}
#[derive(Debug, Clone, Copy)]
struct SimpleNonCentrosymmKernel {
lambda: f64,
}
impl SimpleNonCentrosymmKernel {
fn new(lambda: f64) -> Self {
Self { lambda }
}
}
impl AbstractKernel for SimpleNonCentrosymmKernel {
fn compute<T: CustomNumeric + Copy + Debug>(&self, x: T, y: T) -> T {
x + y
}
fn is_centrosymmetric(&self) -> bool {
false
}
}
#[test]
fn test_noncentrosymm_kernel_is_centrosymmetric() {
let kernel = SimpleNonCentrosymmKernel::new(10.0);
assert!(!kernel.is_centrosymmetric());
}
#[test]
fn test_noncentrosymm_kernel_compute() {
let kernel = SimpleNonCentrosymmKernel::new(10.0);
let result1: f64 = kernel.compute(0.5, 0.5);
assert!((result1 - 1.0).abs() < 1e-10);
let result2: f64 = kernel.compute(-0.5, 0.5);
assert!((result2 - 0.0).abs() < 1e-10);
let result3: f64 = kernel.compute(0.5, 0.5);
let result4: f64 = kernel.compute(-0.5, -0.5);
assert!((result3 - result4).abs() > 1e-10); }