use std::collections::HashMap;
use tensorlogic_compiler::{compile_to_einsum_with_config, CompilationConfig};
use tensorlogic_infer::DummyTensor;
use tensorlogic_ir::{EinsumGraph, EinsumNode, OpType, TLExpr, Term};
fn execute_graph_with_scalars(
graph: &EinsumGraph,
inputs: &HashMap<&str, f64>,
) -> Result<f64, String> {
let mut tensor_table: HashMap<usize, DummyTensor> = HashMap::new();
for (idx, name) in graph.tensors.iter().enumerate() {
let val = inputs.get(name.as_str()).copied().unwrap_or(1.0);
tensor_table.insert(
idx,
DummyTensor::with_data(name.clone(), vec![1], vec![val]),
);
}
for (node_idx, node) in graph.nodes.iter().enumerate() {
let result = execute_node(node, &tensor_table)
.map_err(|e| format!("Node {} error: {}", node_idx, e))?;
let out_slot = node
.outputs
.first()
.copied()
.ok_or_else(|| format!("Node {} has no outputs", node_idx))?;
tensor_table.insert(out_slot, result);
}
let out_idx = graph
.outputs
.first()
.copied()
.ok_or_else(|| "Graph has no outputs".to_string())?;
let out = tensor_table
.get(&out_idx)
.ok_or_else(|| format!("Output tensor index {} not found", out_idx))?;
out.data
.first()
.copied()
.ok_or_else(|| "Output tensor has no data".to_string())
}
fn execute_node(
node: &EinsumNode,
table: &HashMap<usize, DummyTensor>,
) -> Result<DummyTensor, String> {
match &node.op {
OpType::Einsum { spec } => {
let inputs: Vec<DummyTensor> = node
.inputs
.iter()
.map(|&idx| {
table
.get(&idx)
.cloned()
.ok_or_else(|| format!("Einsum: tensor {} not found", idx))
})
.collect::<Result<_, _>>()?;
let product: f64 = inputs
.iter()
.filter_map(|t| t.data.first().copied())
.product();
let out_name = format!("einsum({})", spec);
Ok(DummyTensor::with_data(out_name, vec![1], vec![product]))
}
OpType::ElemUnary { op } => {
let input = table
.get(&node.inputs[0])
.ok_or_else(|| format!("ElemUnary: tensor {} not found", node.inputs[0]))?;
let result_data: Vec<f64> = input
.data
.iter()
.map(|&v| apply_unary_op(op, v))
.collect::<Result<_, _>>()
.map_err(|e: String| e)?;
Ok(DummyTensor::with_data(
format!("{}({})", op, input.name),
input.shape.clone(),
result_data,
))
}
OpType::ElemBinary { op } => {
let a = table
.get(&node.inputs[0])
.ok_or_else(|| format!("ElemBinary: tensor {} not found", node.inputs[0]))?;
let b = table
.get(&node.inputs[1])
.ok_or_else(|| format!("ElemBinary: tensor {} not found", node.inputs[1]))?;
let result_data: Vec<f64> = a
.data
.iter()
.zip(b.data.iter())
.map(|(&av, &bv)| apply_binary_op(op, av, bv))
.collect::<Result<_, _>>()
.map_err(|e: String| e)?;
Ok(DummyTensor::with_data(
format!("{}({},{})", op, a.name, b.name),
a.shape.clone(),
result_data,
))
}
OpType::Reduce { op, axes } => {
let input = table
.get(&node.inputs[0])
.ok_or_else(|| format!("Reduce: tensor {} not found", node.inputs[0]))?;
if axes.is_empty() {
return Ok(input.clone());
}
let val = apply_reduce_op(op, &input.data);
Ok(DummyTensor::with_data(
format!("reduce_{}({})", op, input.name),
vec![1],
vec![val],
))
}
}
}
fn apply_unary_op(op: &str, v: f64) -> Result<f64, String> {
match op.to_lowercase().as_str() {
"relu" => Ok(v.max(0.0)),
"sigmoid" => Ok(1.0 / (1.0 + (-v).exp())),
"oneminus" | "one_minus" => Ok(1.0 - v),
other => Err(format!("Unknown unary op: {}", other)),
}
}
fn apply_binary_op(op: &str, a: f64, b: f64) -> Result<f64, String> {
match op.to_lowercase().as_str() {
"add" => Ok(a + b),
"subtract" | "sub" => Ok(a - b),
"multiply" | "mul" => Ok(a * b),
"divide" | "div" => {
if b.abs() < 1e-10 {
Ok(0.0)
} else {
Ok(a / b)
}
}
"min" => Ok(a.min(b)),
"max" => Ok(a.max(b)),
"eq" | "equal" => Ok(if (a - b).abs() < 1e-10 { 1.0 } else { 0.0 }),
"lt" | "less" => Ok(if a < b { 1.0 } else { 0.0 }),
"gt" | "greater" => Ok(if a > b { 1.0 } else { 0.0 }),
"lte" | "le" => Ok(if a <= b { 1.0 } else { 0.0 }),
"gte" | "ge" => Ok(if a >= b { 1.0 } else { 0.0 }),
"ormax" | "or_max" => Ok(a.max(b)),
"orprobsum" | "or_prob_sum" => Ok(a + b - a * b),
"nand" => Ok(1.0 - (a * b)),
"nor" => Ok(1.0 - a.max(b)),
"xor" => Ok((a - b).abs()),
other => Err(format!("Unknown binary op: {}", other)),
}
}
fn apply_reduce_op(op: &str, data: &[f64]) -> f64 {
match op.to_lowercase().as_str() {
"sum" => data.iter().sum(),
"max" => data.iter().cloned().fold(f64::NEG_INFINITY, f64::max),
"min" => data.iter().cloned().fold(f64::INFINITY, f64::min),
"mean" => data.iter().sum::<f64>() / data.len() as f64,
"product" | "prod" => data.iter().product(),
_ => data.iter().sum(), }
}
fn pred_a() -> TLExpr {
TLExpr::pred("A", vec![Term::var("x")])
}
fn pred_b() -> TLExpr {
TLExpr::pred("B", vec![Term::var("x")])
}
fn pred_c() -> TLExpr {
TLExpr::pred("C", vec![Term::var("x")])
}
fn eval_and(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::and(pred_a(), pred_b());
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_or(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::or(pred_a(), pred_b());
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_not(a: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::negate(pred_a());
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_not_not(a: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::negate(TLExpr::negate(pred_a()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_imply(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::imply(pred_a(), pred_b());
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_not_and(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::negate(TLExpr::and(pred_a(), pred_b()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_or_not_not(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::or(TLExpr::negate(pred_a()), TLExpr::negate(pred_b()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_not_or(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::negate(TLExpr::or(pred_a(), pred_b()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_and_not_not(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::and(TLExpr::negate(pred_a()), TLExpr::negate(pred_b()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_and_a_or_b_c(a: f64, b: f64, c: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::and(pred_a(), TLExpr::or(pred_b(), pred_c()));
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let tensor_c_name = find_tensor_name(&graph, "C");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
inputs.insert(tensor_c_name.as_str(), c);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_or_and_a_b_and_a_c(
a: f64,
b: f64,
c: f64,
config: &CompilationConfig,
) -> Result<f64, String> {
let expr = TLExpr::or(
TLExpr::and(pred_a(), pred_b()),
TLExpr::and(
TLExpr::pred("A", vec![Term::var("x")]),
TLExpr::pred("C", vec![Term::var("x")]),
),
);
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let tensor_c_name = find_tensor_name(&graph, "C");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
inputs.insert(tensor_c_name.as_str(), c);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_or_a_and_a_b(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::or(
pred_a(),
TLExpr::and(TLExpr::pred("A", vec![Term::var("x")]), pred_b()),
);
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn eval_and_a_or_a_b(a: f64, b: f64, config: &CompilationConfig) -> Result<f64, String> {
let expr = TLExpr::and(
pred_a(),
TLExpr::or(TLExpr::pred("A", vec![Term::var("x")]), pred_b()),
);
let graph =
compile_to_einsum_with_config(&expr, config).map_err(|e| format!("compile: {}", e))?;
let tensor_a_name = find_tensor_name(&graph, "A");
let tensor_b_name = find_tensor_name(&graph, "B");
let mut inputs = HashMap::new();
inputs.insert(tensor_a_name.as_str(), a);
inputs.insert(tensor_b_name.as_str(), b);
execute_graph_with_scalars(&graph, &inputs)
}
fn find_tensor_name(graph: &EinsumGraph, pred: &str) -> String {
let prefix = format!("{}[", pred);
graph
.tensors
.iter()
.find(|n| n.starts_with(&prefix))
.cloned()
.unwrap_or_else(|| format!("{}[a]", pred))
}
fn assert_close(actual: f64, expected: f64, tol: f64, msg: &str) {
assert!(
(actual - expected).abs() < tol,
"{}: actual={:.8} expected={:.8} diff={:.2e}",
msg,
actual,
expected,
(actual - expected).abs()
);
}
fn all_configs() -> Vec<(&'static str, CompilationConfig)> {
vec![
(
"soft_differentiable",
CompilationConfig::soft_differentiable(),
),
("hard_boolean", CompilationConfig::hard_boolean()),
("fuzzy_godel", CompilationConfig::fuzzy_godel()),
("fuzzy_product", CompilationConfig::fuzzy_product()),
("fuzzy_lukasiewicz", CompilationConfig::fuzzy_lukasiewicz()),
("probabilistic", CompilationConfig::probabilistic()),
]
}
fn fuzzy_test_values() -> Vec<f64> {
vec![0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]
}
fn all_boolean_pairs() -> Vec<(f64, f64)> {
vec![(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
}
#[test]
fn test_and_true_true_all_strategies_returns_high() {
for (name, config) in all_configs() {
let result =
eval_and(1.0, 1.0, &config).unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert!(
result >= 0.5,
"config={}: AND(1,1)={:.4} expected >= 0.5",
name,
result
);
}
}
#[test]
fn test_and_true_false_all_strategies_returns_low() {
let t_norm_configs: Vec<(&str, CompilationConfig)> = vec![
(
"soft_differentiable",
CompilationConfig::soft_differentiable(),
),
("hard_boolean", CompilationConfig::hard_boolean()),
("fuzzy_godel", CompilationConfig::fuzzy_godel()),
("fuzzy_lukasiewicz", CompilationConfig::fuzzy_lukasiewicz()),
];
for (name, config) in t_norm_configs {
let result =
eval_and(1.0, 0.0, &config).unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert!(
result <= 0.5,
"config={}: AND(1,0)={:.4} expected <= 0.5",
name,
result
);
}
}
#[test]
fn test_and_soft_product_formula() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b) in [(0.3, 0.7), (0.8, 0.9), (0.0, 0.5), (1.0, 1.0)] {
let result =
eval_and(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = a * b;
assert_close(result, expected, tol, &format!("soft AND({},{})", a, b));
}
}
#[test]
fn test_and_lukasiewicz_formula() {
let config = CompilationConfig::fuzzy_lukasiewicz();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.8, 0.9), (0.2, 0.3), (1.0, 1.0)] {
let result =
eval_and(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = (a + b - 1.0_f64).max(0.0);
assert_close(result, expected, tol, &format!("luka AND({},{})", a, b));
}
}
#[test]
fn test_and_hard_boolean_min_formula() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.8, 0.4), (0.0, 1.0), (1.0, 1.0)] {
let result =
eval_and(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = a.min(b);
assert_close(result, expected, tol, &format!("hard AND({},{})", a, b));
}
}
#[test]
fn test_or_false_false_all_strategies_returns_low() {
for (name, config) in all_configs() {
let result =
eval_or(0.0, 0.0, &config).unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert!(
result <= 0.5,
"config={}: OR(0,0)={:.4} expected <= 0.5",
name,
result
);
}
}
#[test]
fn test_or_soft_prob_sum_formula() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.5, 0.5), (0.0, 1.0), (1.0, 1.0)] {
let result =
eval_or(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = a + b - a * b;
assert_close(result, expected, tol, &format!("soft OR({},{})", a, b));
}
}
#[test]
fn test_or_hard_boolean_max_formula() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.8, 0.4), (0.0, 0.0), (1.0, 1.0)] {
let result =
eval_or(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = a.max(b);
assert_close(result, expected, tol, &format!("hard OR({},{})", a, b));
}
}
#[test]
fn test_or_lukasiewicz_formula() {
let config = CompilationConfig::fuzzy_lukasiewicz();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.6, 0.8), (0.0, 0.0), (1.0, 0.5)] {
let result =
eval_or(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = (a + b).min(1.0);
assert_close(result, expected, tol, &format!("luka OR({},{})", a, b));
}
}
#[test]
fn test_not_complement_formula() {
let tol = 1e-9;
for (name, config) in all_configs() {
for &a in fuzzy_test_values().iter() {
let result = eval_not(a, &config)
.unwrap_or_else(|e| panic!("config={} a={} error: {}", name, a, e));
let expected = 1.0 - a;
assert_close(
result,
expected,
tol,
&format!("config={} NOT({})", name, a),
);
}
}
}
#[test]
fn test_not_double_negation_identity() {
let tol = 1e-9;
for (name, config) in all_configs() {
for &a in fuzzy_test_values().iter() {
let result = eval_not_not(a, &config)
.unwrap_or_else(|e| panic!("config={} a={} error: {}", name, a, e));
assert_close(result, a, tol, &format!("config={} NOT(NOT({}))", name, a));
}
}
}
#[test]
fn test_implication_relu_truth_table_hard_boolean() {
let config = CompilationConfig::hard_boolean();
let cases: &[(f64, f64, f64)] = &[
(1.0, 1.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 1.0), (0.0, 0.0, 0.0), ];
let tol = 1e-9;
for &(a, b, expected) in cases {
let result =
eval_imply(a, b, &config).unwrap_or_else(|e| panic!("imply({},{}) error: {}", a, b, e));
assert_close(result, expected, tol, &format!("hard imply({},{})", a, b));
}
}
#[test]
fn test_implication_lukasiewicz_uses_relu_formula() {
let config = CompilationConfig::fuzzy_lukasiewicz();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.8, 0.3), (0.0, 0.0), (1.0, 1.0)] {
let result =
eval_imply(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = (b - a).max(0.0);
assert_close(
result,
expected,
tol,
&format!("luka config imply({},{}) actual=relu", a, b),
);
}
}
#[test]
fn test_implication_relu_formula() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.8, 0.3), (0.5, 0.5), (0.0, 1.0)] {
let result =
eval_imply(a, b, &config).unwrap_or_else(|e| panic!("a={} b={} error: {}", a, b, e));
let expected = (b - a).max(0.0);
assert_close(result, expected, tol, &format!("soft imply({},{})", a, b));
}
}
#[test]
fn test_implication_soft_agrees_with_material_on_booleans() {
for (name, config) in all_configs() {
let tf = eval_imply(1.0, 0.0, &config)
.unwrap_or_else(|e| panic!("config={} error: {}", name, e));
let ft = eval_imply(0.0, 1.0, &config)
.unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert!(
tf <= 0.5,
"config={}: (T→F)={:.4} should be <= 0.5",
name,
tf
);
assert!(
ft >= 0.5,
"config={}: (F→T)={:.4} should be >= 0.5",
name,
ft
);
}
}
#[test]
fn test_demorgan_and_soft_booleans() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b) in all_boolean_pairs() {
let lhs = eval_not_and(a, b, &config)
.unwrap_or_else(|e| panic!("NOT(AND) a={} b={} error: {}", a, b, e));
let rhs = eval_or_not_not(a, b, &config)
.unwrap_or_else(|e| panic!("OR(NOT,NOT) a={} b={} error: {}", a, b, e));
assert_close(lhs, rhs, tol, &format!("De Morgan AND soft ({},{})", a, b));
}
}
#[test]
fn test_demorgan_or_soft_booleans() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b) in all_boolean_pairs() {
let lhs = eval_not_or(a, b, &config)
.unwrap_or_else(|e| panic!("NOT(OR) a={} b={} error: {}", a, b, e));
let rhs = eval_and_not_not(a, b, &config)
.unwrap_or_else(|e| panic!("AND(NOT,NOT) a={} b={} error: {}", a, b, e));
assert_close(lhs, rhs, tol, &format!("De Morgan OR soft ({},{})", a, b));
}
}
#[test]
fn test_demorgan_and_hard() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let lhs = eval_not_and(a, b, &config)
.unwrap_or_else(|e| panic!("NOT(AND) a={} b={} error: {}", a, b, e));
let rhs = eval_or_not_not(a, b, &config)
.unwrap_or_else(|e| panic!("OR(NOT,NOT) a={} b={} error: {}", a, b, e));
assert_close(lhs, rhs, tol, &format!("De Morgan AND hard ({},{})", a, b));
}
}
}
#[test]
fn test_demorgan_or_hard() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let lhs = eval_not_or(a, b, &config)
.unwrap_or_else(|e| panic!("NOT(OR) a={} b={} error: {}", a, b, e));
let rhs = eval_and_not_not(a, b, &config)
.unwrap_or_else(|e| panic!("AND(NOT,NOT) a={} b={} error: {}", a, b, e));
assert_close(lhs, rhs, tol, &format!("De Morgan OR hard ({},{})", a, b));
}
}
}
#[test]
fn test_distributive_and_over_or_hard_boolean() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
let values = [0.0, 0.5, 1.0];
for &a in &values {
for &b in &values {
for &c in &values {
let lhs = eval_and_a_or_b_c(a, b, c, &config).unwrap_or_else(|e| {
panic!("AND(a,OR(b,c)) a={} b={} c={} error: {}", a, b, c, e)
});
let rhs = eval_or_and_a_b_and_a_c(a, b, c, &config).unwrap_or_else(|e| {
panic!("OR(AND(a,b),AND(a,c)) a={} b={} c={} error: {}", a, b, c, e)
});
assert_close(
lhs,
rhs,
tol,
&format!("distributive AND/OR hard ({},{},{})", a, b, c),
);
}
}
}
}
#[test]
fn test_distributive_and_over_or_soft_formula() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for (a, b, c) in [
(0.3_f64, 0.4_f64, 0.5_f64),
(0.8, 0.2, 0.6),
(1.0, 0.0, 1.0),
] {
let result = eval_and_a_or_b_c(a, b, c, &config)
.unwrap_or_else(|e| panic!("AND(a,OR(b,c)) a={} b={} c={} error: {}", a, b, c, e));
let expected = a * (b + c - b * c);
assert_close(
result,
expected,
tol,
&format!("soft AND(a,OR(b,c)) ({},{},{})", a, b, c),
);
}
}
#[test]
fn test_distributive_and_over_or_lukasiewicz_booleans() {
let config = CompilationConfig::fuzzy_lukasiewicz();
let tol = 1e-9;
let values = [0.0_f64, 1.0_f64];
for &a in &values {
for &b in &values {
for &c in &values {
let lhs = eval_and_a_or_b_c(a, b, c, &config).unwrap_or_else(|e| {
panic!("AND(a,OR(b,c)) a={} b={} c={} error: {}", a, b, c, e)
});
let rhs = eval_or_and_a_b_and_a_c(a, b, c, &config).unwrap_or_else(|e| {
panic!("OR(AND(a,b),AND(a,c)) a={} b={} c={} error: {}", a, b, c, e)
});
assert_close(
lhs,
rhs,
tol,
&format!("distributive luka AND/OR booleans ({},{},{})", a, b, c),
);
}
}
}
}
#[test]
fn test_absorption_or_and_hard() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let result = eval_or_a_and_a_b(a, b, &config)
.unwrap_or_else(|e| panic!("OR(a,AND(a,b)) a={} b={} error: {}", a, b, e));
assert_close(
result,
a,
tol,
&format!("absorption OR(a,AND(a,b)) hard ({},{})", a, b),
);
}
}
}
#[test]
fn test_absorption_and_or_hard() {
let config = CompilationConfig::hard_boolean();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let result = eval_and_a_or_a_b(a, b, &config)
.unwrap_or_else(|e| panic!("AND(a,OR(a,b)) a={} b={} error: {}", a, b, e));
assert_close(
result,
a,
tol,
&format!("absorption AND(a,OR(a,b)) hard ({},{})", a, b),
);
}
}
}
#[test]
fn test_absorption_soft_at_boolean_inputs() {
let config = CompilationConfig::soft_differentiable();
let tol = 1e-9;
for &a in &[0.0_f64, 1.0_f64] {
for b in fuzzy_test_values() {
let result = eval_and_a_or_a_b(a, b, &config)
.unwrap_or_else(|e| panic!("AND(a,OR(a,b)) a={} b={} error: {}", a, b, e));
assert_close(
result,
a,
tol,
&format!("absorption AND(a,OR(a,b)) soft boolean ({},{})", a, b),
);
}
}
}
#[test]
fn test_all_six_strategies_compile_without_error() {
let expr = TLExpr::and(pred_a(), pred_b());
for (name, config) in all_configs() {
compile_to_einsum_with_config(&expr, &config)
.unwrap_or_else(|e| panic!("config={} failed to compile: {}", name, e));
}
}
#[test]
fn test_boolean_inputs_consistent_across_strategies() {
let tol = 1e-9;
for (name, config) in all_configs() {
let r =
eval_and(0.0, 0.0, &config).unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert_close(r, 0.0, tol, &format!("config={} AND(0,0)", name));
}
for (name, config) in all_configs() {
let r =
eval_and(1.0, 1.0, &config).unwrap_or_else(|e| panic!("config={} error: {}", name, e));
assert_close(r, 1.0, tol, &format!("config={} AND(1,1)", name));
}
}
#[test]
fn test_soft_and_leq_hard_and_for_high_confidence() {
let soft = CompilationConfig::soft_differentiable();
let hard = CompilationConfig::hard_boolean();
for &a in &[0.7_f64, 0.8, 0.9, 1.0] {
for &b in &[0.7_f64, 0.8, 0.9, 1.0] {
let soft_val = eval_and(a, b, &soft)
.unwrap_or_else(|e| panic!("soft AND({},{}) error: {}", a, b, e));
let hard_val = eval_and(a, b, &hard)
.unwrap_or_else(|e| panic!("hard AND({},{}) error: {}", a, b, e));
assert!(
soft_val <= hard_val + 1e-9,
"product({},{})={:.6} should be <= min({},{})={:.6}",
a,
b,
soft_val,
a,
b,
hard_val
);
}
}
}
#[test]
fn test_lukasiewicz_and_triangle_inequality() {
let config = CompilationConfig::fuzzy_lukasiewicz();
let values = [0.0_f64, 0.2, 0.4, 0.6, 0.8, 1.0];
for &a in &values {
for &b in &values {
for &c in &values {
let ab = eval_and(a, b, &config)
.unwrap_or_else(|e| panic!("AND({},{}) error: {}", a, b, e));
let bc = eval_and(b, c, &config)
.unwrap_or_else(|e| panic!("AND({},{}) error: {}", b, c, e));
let ac = eval_and(a, c, &config)
.unwrap_or_else(|e| panic!("AND({},{}) error: {}", a, c, e));
assert!(
ab + bc + 1.0 >= ac - 1e-9,
"Łukasiewicz triangle: AND({},{})={:.4} + AND({},{})={:.4} + 1 < AND({},{})={:.4}",
a, b, ab, b, c, bc, a, c, ac
);
}
}
}
}
#[test]
fn test_not_output_in_unit_interval() {
for (name, config) in all_configs() {
for &a in fuzzy_test_values().iter() {
let result = eval_not(a, &config)
.unwrap_or_else(|e| panic!("config={} NOT({}) error: {}", name, a, e));
assert!(
(-1e-9..=(1.0 + 1e-9)).contains(&result),
"config={}: NOT({})={:.6} out of [0,1]",
name,
a,
result
);
}
}
}
#[test]
fn test_and_output_in_unit_interval() {
for (name, config) in all_configs() {
for (a, b) in [(0.3_f64, 0.7_f64), (0.5, 0.5), (0.0, 1.0), (0.9, 0.9)] {
let result = eval_and(a, b, &config)
.unwrap_or_else(|e| panic!("config={} AND({},{}) error: {}", name, a, b, e));
assert!(
(-1e-9..=(1.0 + 1e-9)).contains(&result),
"config={}: AND({},{})={:.6} out of [0,1]",
name,
a,
b,
result
);
}
}
}
#[test]
fn test_or_output_in_unit_interval() {
for (name, config) in all_configs() {
for (a, b) in [(0.3_f64, 0.7_f64), (0.5, 0.5), (0.0, 1.0), (0.9, 0.9)] {
let result = eval_or(a, b, &config)
.unwrap_or_else(|e| panic!("config={} OR({},{}) error: {}", name, a, b, e));
assert!(
(-1e-9..=(1.0 + 1e-9)).contains(&result),
"config={}: OR({},{})={:.6} out of [0,1]",
name,
a,
b,
result
);
}
}
}
#[test]
fn test_godel_and_equals_min() {
let godel = CompilationConfig::fuzzy_godel();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let result = eval_and(a, b, &godel)
.unwrap_or_else(|e| panic!("Gödel AND({},{}) error: {}", a, b, e));
let expected = a.min(b);
assert_close(result, expected, tol, &format!("Gödel AND({},{})", a, b));
}
}
}
#[test]
fn test_godel_or_equals_max() {
let godel = CompilationConfig::fuzzy_godel();
let tol = 1e-9;
for a in fuzzy_test_values() {
for b in fuzzy_test_values() {
let result = eval_or(a, b, &godel)
.unwrap_or_else(|e| panic!("Gödel OR({},{}) error: {}", a, b, e));
let expected = a.max(b);
assert_close(result, expected, tol, &format!("Gödel OR({},{})", a, b));
}
}
}
#[test]
fn test_probabilistic_and_formula() {
let config = CompilationConfig::probabilistic();
let tol = 1e-9;
for (a, b) in [(0.3_f64, 0.7_f64), (0.5, 0.5), (0.0, 1.0), (0.2, 0.8)] {
let result = eval_and(a, b, &config)
.unwrap_or_else(|e| panic!("prob AND({},{}) error: {}", a, b, e));
let expected = a + b - a * b;
assert_close(
result,
expected,
tol,
&format!("probabilistic AND({},{})", a, b),
);
}
}