use amari_core::Multivector;
use amari_dual::{functions::softmax, DualNumber};
use amari_fusion::TropicalDualClifford;
use amari_tropical::{viterbi::TropicalViterbi, TropicalNumber};
use approx::assert_relative_eq;
#[test]
fn test_softmax_consistency() {
let logits = [1.0, 2.0, 3.0, 0.5, 1.5];
let dual_logits: Vec<DualNumber<f64>> =
logits.iter().map(|&x| DualNumber::variable(x)).collect();
let standard_softmax = softmax(&dual_logits);
let tropical_logits: Vec<TropicalNumber<f64>> =
logits.iter().map(|&x| TropicalNumber::new(x)).collect();
let max_val = tropical_logits.iter().fold(
TropicalNumber::neg_infinity(),
|acc: TropicalNumber<f64>, x| acc.tropical_add(x),
);
let tropical_normalized: Vec<f64> = tropical_logits
.iter()
.map(|x| (x.value() - max_val.value()).exp())
.collect();
let tropical_sum: f64 = tropical_normalized.iter().sum();
let tropical_softmax: Vec<f64> = tropical_normalized
.iter()
.map(|&x| x / tropical_sum)
.collect();
for (standard, tropical) in standard_softmax.iter().zip(tropical_softmax.iter()) {
assert_relative_eq!(standard.real, *tropical, epsilon = 1e-10);
}
}
#[test]
fn test_gradient_consistency() {
let inputs = vec![0.5, 1.0, 1.5, 0.8];
let targets = vec![1.0, 0.0, 0.0, 0.0];
let dual_inputs: Vec<DualNumber<f64>> =
inputs.iter().map(|&x| DualNumber::variable(x)).collect();
let softmax_outputs = softmax(&dual_inputs);
let auto_gradient: Vec<f64> = softmax_outputs
.iter()
.zip(targets.iter())
.map(|(prob, &target)| prob.real - target)
.collect();
let epsilon = 1e-8;
let mut manual_gradient = Vec::with_capacity(inputs.len());
for i in 0..inputs.len() {
let mut inputs_plus = inputs.clone();
let mut inputs_minus = inputs.clone();
inputs_plus[i] += epsilon;
inputs_minus[i] -= epsilon;
let loss_plus = manual_cross_entropy(&inputs_plus, &targets);
let loss_minus = manual_cross_entropy(&inputs_minus, &targets);
let finite_diff = (loss_plus - loss_minus) / (2.0 * epsilon);
manual_gradient.push(finite_diff);
}
for (auto, manual) in auto_gradient.iter().zip(manual_gradient.iter()) {
assert_relative_eq!(*auto, *manual, epsilon = 1e-5);
}
}
fn manual_cross_entropy(inputs: &[f64], targets: &[f64]) -> f64 {
let max_val = inputs.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
let exp_vals: Vec<f64> = inputs.iter().map(|&x| (x - max_val).exp()).collect();
let sum_exp: f64 = exp_vals.iter().sum();
let softmax_vals: Vec<f64> = exp_vals.iter().map(|&x| x / sum_exp).collect();
let mut loss = 0.0;
for (prob, target) in softmax_vals.iter().zip(targets.iter()) {
if *prob > 0.0 && *target > 0.0 {
loss -= target * prob.ln();
}
}
loss
}
#[test]
fn test_clifford_consistency() {
let coeffs1 = vec![1.0, 0.5, 0.3, 0.2, 0.0, 0.0, 0.0, 0.0];
let coeffs2 = vec![0.8, 0.6, 0.4, 0.3, 0.0, 0.0, 0.0, 0.0];
let mv1 = Multivector::<3, 0, 0>::from_coefficients(coeffs1.clone());
let mv2 = Multivector::<3, 0, 0>::from_coefficients(coeffs2.clone());
let clifford_product = mv1.geometric_product(&mv2);
let expected_scalar = 1.16;
assert_relative_eq!(clifford_product.get(0), expected_scalar, epsilon = 1e-10);
let e1 =
Multivector::<3, 0, 0>::from_coefficients(vec![0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
let e1_squared = e1.geometric_product(&e1);
assert_relative_eq!(e1_squared.get(0), 1.0, epsilon = 1e-10);
let e2 =
Multivector::<3, 0, 0>::from_coefficients(vec![0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
let e1_e2 = e1.geometric_product(&e2);
assert_relative_eq!(e1_e2.get(0), 0.0, epsilon = 1e-10); assert_relative_eq!(e1_e2.get(3), 1.0, epsilon = 1e-10); }
#[test]
fn test_tdc_self_consistency() {
let logits = vec![1.5, 2.0, 0.8, 1.2, 0.5, 1.8, 0.3, 0.9];
let tdc = TropicalDualClifford::<f64, 8>::from_logits(&logits);
let identity_transform = TropicalDualClifford::from_logits(&[0.0; 8]);
let transformed = tdc.transform(&identity_transform);
let distance = tdc.distance(&transformed);
assert!(distance < 10.0);
}
#[test]
fn test_tropical_dp_consistency() {
let size = 5;
let transitions: Vec<Vec<f64>> = (0..size)
.map(|i| (0..size).map(|j| if i == j { 0.0 } else { -1.0 }).collect())
.collect();
let num_observations = 3;
let emissions: Vec<Vec<f64>> = (0..size)
.map(|s| {
(0..num_observations)
.map(|o| (s * num_observations + o) as f64 * 0.1)
.collect()
})
.collect();
let observations = vec![0, 1, 2];
let decoder = TropicalViterbi::new(transitions.clone(), emissions.clone());
let (tropical_path, _prob) = decoder.decode(&observations);
let standard_path = standard_viterbi(&transitions, &emissions, &observations);
assert_eq!(tropical_path.len(), standard_path.len());
assert_eq!(tropical_path, standard_path);
}
fn standard_viterbi(
transitions: &[Vec<f64>],
emissions: &[Vec<f64>],
observations: &[usize],
) -> Vec<usize> {
let num_states = transitions.len();
let seq_len = observations.len();
if seq_len == 0 || num_states == 0 {
return Vec::new();
}
let mut dp = vec![vec![f64::NEG_INFINITY; num_states]; seq_len];
let mut path = vec![vec![0; num_states]; seq_len];
for (state, emission) in emissions.iter().enumerate().take(num_states) {
if observations[0] < emission.len() {
dp[0][state] = emission[observations[0]];
}
}
for t in 1..seq_len {
for curr_state in 0..num_states {
for (prev_state, _transition) in transitions.iter().enumerate().take(num_states) {
let emission_score = if curr_state < emissions.len()
&& observations[t] < emissions[curr_state].len()
{
emissions[curr_state][observations[t]]
} else {
0.0
};
let score =
dp[t - 1][prev_state] + transitions[prev_state][curr_state] + emission_score;
if score > dp[t][curr_state] {
dp[t][curr_state] = score;
path[t][curr_state] = prev_state;
}
}
}
}
let mut result = vec![0; seq_len];
let mut best_state = 0;
let mut best_score = f64::NEG_INFINITY;
#[allow(clippy::needless_range_loop)]
for state in 0..num_states {
if dp[seq_len - 1][state] > best_score {
best_score = dp[seq_len - 1][state];
best_state = state;
}
}
result[seq_len - 1] = best_state;
for t in (1..seq_len).rev() {
result[t - 1] = path[t][result[t]];
}
result
}
#[test]
fn test_numerical_stability() {
let extreme_logits = vec![100.0, -100.0, 50.0, -50.0];
let tdc = TropicalDualClifford::<f64, 4>::from_logits(&extreme_logits);
let evaluation = tdc.evaluate(&tdc);
assert!(evaluation.combined_score.is_finite());
assert!(!evaluation.combined_score.is_nan());
let self_distance = tdc.distance(&tdc);
assert!(self_distance < 1e-10);
}
#[test]
fn test_interpolation_consistency() {
let logits1 = vec![1.0, 0.0, 0.0, 0.0];
let logits2 = vec![0.0, 1.0, 0.0, 0.0];
let tdc1 = TropicalDualClifford::<f64, 4>::from_logits(&logits1);
let tdc2 = TropicalDualClifford::<f64, 4>::from_logits(&logits2);
let interp_0 = tdc1.interpolate(&tdc2, 0.0);
let interp_1 = tdc1.interpolate(&tdc2, 1.0);
let dist_0 = tdc1.distance(&interp_0);
let dist_1 = tdc2.distance(&interp_1);
println!("Debug - dist_0 (should be ~0): {}", dist_0);
println!("Debug - dist_1 (should be ~0): {}", dist_1);
assert!(dist_0 < 1e-10); assert!(dist_1 < 1e-10);
let interp_mid = tdc1.interpolate(&tdc2, 0.5);
let dist_mid_1 = tdc1.distance(&interp_mid);
let dist_mid_2 = tdc2.distance(&interp_mid);
assert!((dist_mid_1 - dist_mid_2).abs() < 1.0);
}
#[test]
#[ignore]
fn test_conversion_consistency() {
}
#[test]
fn test_stress_integration() {
let large_size = 100;
let logits: Vec<f64> = (0..large_size).map(|i| (i as f64) * 0.01 - 0.5).collect();
let tdc = TropicalDualClifford::<f64, 8>::from_logits(&logits[..8]);
let mut current = tdc.clone();
for i in 0..10 {
let modifier_logits: Vec<f64> = (0..8).map(|j| (i + j) as f64 * 0.01).collect();
let modifier = TropicalDualClifford::<f64, 8>::from_logits(&modifier_logits);
current = current.transform(&modifier);
assert!(current.distance(&tdc).is_finite());
assert!(!current.distance(&tdc).is_nan());
}
}
#[test]
fn test_floating_point_consistency() {
let logits_f64 = vec![1.0, 2.0, 0.5, 1.5];
let logits_f32: Vec<f32> = logits_f64.iter().map(|&x| x as f32).collect();
let _tdc_f64 = TropicalDualClifford::<f64, 4>::from_logits(&logits_f64);
let _tdc_f32 = TropicalDualClifford::<f32, 4>::from_logits(&logits_f32);
}