use super::*;
#[test]
fn m7_embedding_similarity_correlation() {
let mut space = EmbeddingSpace::new(6, 8);
space.add_relation("similar_to");
space.set_entity(0, vec![1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
space.set_entity(1, vec![0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
space.set_entity(2, vec![0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
space.set_entity(3, vec![0.0, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0]);
space.set_entity(4, vec![0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]);
space.set_entity(5, vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]);
let sim_01 = cosine_similarity(space.get_entity(0).unwrap(), space.get_entity(1).unwrap());
let sim_23 = cosine_similarity(space.get_entity(2).unwrap(), space.get_entity(3).unwrap());
let sim_45 = cosine_similarity(space.get_entity(4).unwrap(), space.get_entity(5).unwrap());
assert!(
sim_01 > 0.9,
"Related entities 0,1 should be similar: {sim_01}"
);
assert!(
sim_23 > 0.9,
"Related entities 2,3 should be similar: {sim_23}"
);
assert!(
sim_45 < 0.1,
"Unrelated entities 4,5 should be dissimilar: {sim_45}"
);
assert_eq!(space.dim(), 8, "Embedding dimension preserved");
}
fn cosine_similarity(a: &[f64], b: &[f64]) -> f64 {
let dot: f64 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
let norm_a: f64 = a.iter().map(|x| x * x).sum::<f64>().sqrt();
let norm_b: f64 = b.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm_a < 1e-10 || norm_b < 1e-10 {
return 0.0;
}
dot / (norm_a * norm_b)
}
#[test]
fn m8_negative_sampling_discrimination() {
let mut space = EmbeddingSpace::new(10, 8);
space.add_relation("knows");
let positive_triples = vec![
(0, 1), (1, 2), (2, 3), ];
let negative_triples = vec![
(0, 9), (5, 6), (7, 8), ];
let pos_scores: Vec<f64> = positive_triples
.iter()
.map(|(s, o)| space.score(*s, "knows", *o))
.collect();
let neg_scores: Vec<f64> = negative_triples
.iter()
.map(|(s, o)| space.score(*s, "knows", *o))
.collect();
let margin = 1.0;
let mut contrastive_losses = Vec::new();
for (pos, neg) in pos_scores.iter().zip(neg_scores.iter()) {
let loss = (margin - pos + neg).max(0.0);
contrastive_losses.push(loss);
}
assert_eq!(contrastive_losses.len(), 3);
let avg_loss: f64 = contrastive_losses.iter().sum::<f64>() / contrastive_losses.len() as f64;
assert!(avg_loss.is_finite(), "Contrastive loss should be finite");
let pos_variance = variance(&pos_scores);
let neg_variance = variance(&neg_scores);
println!(
"M8: Pos variance={:.4}, Neg variance={:.4}, Avg contrastive loss={:.4}",
pos_variance, neg_variance, avg_loss
);
assert!(pos_scores.iter().all(|s| s.is_finite()));
assert!(neg_scores.iter().all(|s| s.is_finite()));
}
fn variance(values: &[f64]) -> f64 {
if values.is_empty() {
return 0.0;
}
let mean: f64 = values.iter().sum::<f64>() / values.len() as f64;
values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / values.len() as f64
}
#[test]
fn m9_curriculum_learning_convergence() {
let easy_facts = vec![
vec![vec![1.0, 0.0], vec![0.0, 1.0]], ];
let medium_facts = vec![
vec![vec![0.0, 1.0], vec![0.0, 0.0]], vec![vec![0.0, 0.0], vec![0.0, 1.0]], ];
let hard_facts = vec![vec![
vec![0.0, 1.0, 0.0],
vec![0.0, 0.0, 1.0],
vec![0.0, 0.0, 0.0],
]];
let mut total_loss = 0.0;
let mut phase_losses = Vec::new();
for fact in &easy_facts {
let result = logical_join(fact, fact, LogicMode::Continuous);
let loss = compute_reconstruction_loss(fact, &result);
total_loss += loss;
phase_losses.push(("easy", loss));
}
for i in 0..medium_facts.len() - 1 {
let result = logical_join(
&medium_facts[i],
&medium_facts[i + 1],
LogicMode::Continuous,
);
let loss = compute_reconstruction_loss(&medium_facts[i], &result);
total_loss += loss;
phase_losses.push(("medium", loss));
}
for fact in &hard_facts {
let result = logical_join(fact, fact, LogicMode::Continuous);
let loss = compute_reconstruction_loss(fact, &result);
total_loss += loss;
phase_losses.push(("hard", loss));
}
println!("M9 Curriculum phases: {:?}", phase_losses);
assert!(total_loss.is_finite(), "Curriculum losses should be finite");
let easy_loss = phase_losses
.iter()
.filter(|(phase, _)| *phase == "easy")
.map(|(_, loss)| *loss)
.sum::<f64>();
assert!(
easy_loss.is_finite(),
"Easy curriculum phase should converge"
);
}
fn compute_reconstruction_loss(original: &[Vec<f64>], reconstructed: &[Vec<f64>]) -> f64 {
let mut total = 0.0;
let mut count = 0;
for (orig_row, recon_row) in original.iter().zip(reconstructed.iter()) {
for (o, r) in orig_row.iter().zip(recon_row.iter()) {
total += (o - r).powi(2);
count += 1;
}
}
if count > 0 {
total / count as f64
} else {
0.0
}
}
#[test]
fn m10_symbolic_constraints_llm_outputs() {
let raw_logits = vec![vec![1.0, 2.0, 0.5, 3.0, 1.5, 0.0, 2.5, 1.0]];
let constraint_mask = vec![vec![false, true, false, true, false, true, false, true]];
let constrained_logits =
apply_nonlinearity_with_mask(&raw_logits, Nonlinearity::Softmax, Some(&constraint_mask));
assert!(
constrained_logits[0][1] < 1e-6,
"Masked token 1 should have ~0 prob"
);
assert!(
constrained_logits[0][3] < 1e-6,
"Masked token 3 should have ~0 prob"
);
assert!(
constrained_logits[0][5] < 1e-6,
"Masked token 5 should have ~0 prob"
);
assert!(
constrained_logits[0][7] < 1e-6,
"Masked token 7 should have ~0 prob"
);
let unmasked_sum: f64 = constrained_logits[0][0]
+ constrained_logits[0][2]
+ constrained_logits[0][4]
+ constrained_logits[0][6];
assert!(
(unmasked_sum - 1.0).abs() < 1e-5,
"Unmasked probabilities should sum to 1: {unmasked_sum}"
);
let unconstrained = apply_nonlinearity(&raw_logits, Nonlinearity::Softmax);
let unconstrained_argmax = unconstrained[0]
.iter()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
let constrained_argmax = constrained_logits[0]
.iter()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
assert_eq!(
unconstrained_argmax, 3,
"Unconstrained argmax should be token 3"
);
assert!(
constrained_argmax == 0
|| constrained_argmax == 2
|| constrained_argmax == 4
|| constrained_argmax == 6,
"Constrained argmax should be in valid set: got {constrained_argmax}"
);
println!(
"M10: Unconstrained argmax={}, Constrained argmax={}",
unconstrained_argmax, constrained_argmax
);
assert_ne!(
unconstrained_argmax, constrained_argmax,
"Symbolic constraint should change the output"
);
}
fn apply_nonlinearity_with_mask(
scores: &[Vec<f64>],
nonlinearity: Nonlinearity,
mask: Option<&[Vec<bool>]>,
) -> Vec<Vec<f64>> {
ops::apply_nonlinearity_with_mask(scores, nonlinearity, mask)
}
pub(super) fn apply_nonlinearity_with_temp(
scores: &[Vec<f64>],
nonlinearity: Nonlinearity,
temperature: f64,
) -> Vec<Vec<f64>> {
apply_nonlinearity_with_temperature(scores, nonlinearity, temperature)
}