use std::f64;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LogicMode {
Boolean,
Continuous,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Nonlinearity {
Step,
Sigmoid,
Relu,
Softmax,
Tanh,
BooleanAttention,
Identity,
}
#[must_use]
pub fn logical_join(t1: &[Vec<f64>], t2: &[Vec<f64>], mode: LogicMode) -> Vec<Vec<f64>> {
let rows = t1.len();
let inner = if t1.is_empty() { 0 } else { t1[0].len() };
let cols = if t2.is_empty() { 0 } else { t2[0].len() };
let mut result = vec![vec![0.0; cols]; rows];
for i in 0..rows {
for k in 0..cols {
let mut sum = 0.0;
for j in 0..inner {
sum += t1[i][j] * t2[j][k];
}
result[i][k] = sum;
}
}
match mode {
LogicMode::Boolean => threshold_matrix(&mut result),
LogicMode::Continuous => {}
}
result
}
pub fn logical_project(tensor: &[Vec<f64>], dim: usize, mode: LogicMode) -> Vec<f64> {
match dim {
0 => {
let cols = if tensor.is_empty() {
0
} else {
tensor[0].len()
};
let mut result = vec![0.0; cols];
for j in 0..cols {
match mode {
LogicMode::Boolean => {
let max_val = tensor.iter().map(|row| row[j]).fold(0.0, f64::max);
result[j] = if max_val > 0.5 { 1.0 } else { 0.0 };
}
LogicMode::Continuous => {
result[j] = tensor.iter().map(|row| row[j]).sum();
}
}
}
result
}
1 => {
let mut result = Vec::with_capacity(tensor.len());
for row in tensor {
match mode {
LogicMode::Boolean => {
let max_val = row.iter().fold(0.0, |a, &b| f64::max(a, b));
result.push(if max_val > 0.5 { 1.0 } else { 0.0 });
}
LogicMode::Continuous => {
result.push(row.iter().sum());
}
}
}
result
}
_ => panic!("Invalid dimension for 2D tensor projection"),
}
}
#[must_use]
pub fn logical_union(t1: &[Vec<f64>], t2: &[Vec<f64>], mode: LogicMode) -> Vec<Vec<f64>> {
let rows = t1.len();
let cols = if t1.is_empty() { 0 } else { t1[0].len() };
let mut result = vec![vec![0.0; cols]; rows];
for i in 0..rows {
for j in 0..cols {
match mode {
LogicMode::Boolean => {
let a = if t1[i][j] > 0.5 { 1.0 } else { 0.0 };
let b = if t2[i][j] > 0.5 { 1.0 } else { 0.0 };
result[i][j] = f64::max(a, b);
}
LogicMode::Continuous => {
let a = t1[i][j];
let b = t2[i][j];
result[i][j] = a + b - a * b;
}
}
}
}
result
}
#[must_use]
pub fn logical_negation(tensor: &[Vec<f64>], mode: LogicMode) -> Vec<Vec<f64>> {
let rows = tensor.len();
let cols = if tensor.is_empty() {
0
} else {
tensor[0].len()
};
let mut result = vec![vec![0.0; cols]; rows];
for i in 0..rows {
for j in 0..cols {
match mode {
LogicMode::Boolean => {
let val = if tensor[i][j] > 0.5 { 1.0 } else { 0.0 };
result[i][j] = 1.0 - val;
}
LogicMode::Continuous => {
result[i][j] = 1.0 - tensor[i][j];
}
}
}
}
result
}
#[must_use]
pub fn logical_select(
tensor: &[Vec<f64>],
condition: &[Vec<f64>],
mode: LogicMode,
) -> Vec<Vec<f64>> {
let rows = tensor.len();
let cols = if tensor.is_empty() {
0
} else {
tensor[0].len()
};
let mut result = vec![vec![0.0; cols]; rows];
for i in 0..rows {
for j in 0..cols {
let cond = match mode {
LogicMode::Boolean => {
if condition[i][j] > 0.5 {
1.0
} else {
0.0
}
}
LogicMode::Continuous => condition[i][j],
};
result[i][j] = tensor[i][j] * cond;
}
}
result
}
#[must_use]
pub fn apply_nonlinearity(tensor: &[Vec<f64>], func: Nonlinearity) -> Vec<Vec<f64>> {
apply_nonlinearity_with_temperature(tensor, func, 1.0)
}
#[must_use]
pub fn apply_nonlinearity_with_temperature(
tensor: &[Vec<f64>],
func: Nonlinearity,
temperature: f64,
) -> Vec<Vec<f64>> {
tensor
.iter()
.map(|row| {
match func {
Nonlinearity::Softmax => {
let scaled: Vec<f64> = row.iter().map(|x| x / temperature).collect();
softmax_row(&scaled)
}
Nonlinearity::BooleanAttention => {
let max_idx = row
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| {
a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)
})
.map_or(0, |(i, _)| i);
let mut result = vec![0.0; row.len()];
result[max_idx] = 1.0;
result
}
_ => apply_nonlinearity_row(row, func, None),
}
})
.collect()
}
#[must_use]
pub fn apply_nonlinearity_with_mask(
tensor: &[Vec<f64>],
func: Nonlinearity,
mask: Option<&[Vec<bool>]>,
) -> Vec<Vec<f64>> {
tensor
.iter()
.enumerate()
.map(|(i, row)| {
let row_mask = mask.map(|m| &m[i]);
apply_nonlinearity_row(row, func, row_mask)
})
.collect()
}
fn apply_nonlinearity_row(row: &[f64], func: Nonlinearity, mask: Option<&Vec<bool>>) -> Vec<f64> {
match func {
Nonlinearity::Step => row
.iter()
.map(|&x| if x > 0.0 { 1.0 } else { 0.0 })
.collect(),
Nonlinearity::Sigmoid => row.iter().map(|&x| 1.0 / (1.0 + (-x).exp())).collect(),
Nonlinearity::Relu => row.iter().map(|&x| f64::max(0.0, x)).collect(),
Nonlinearity::Tanh => row.iter().map(|&x| x.tanh()).collect(),
Nonlinearity::Identity => row.to_vec(),
Nonlinearity::Softmax => {
let masked_row: Vec<f64> = if let Some(m) = mask {
row.iter()
.zip(m.iter())
.map(|(&x, &masked)| if masked { f64::NEG_INFINITY } else { x })
.collect()
} else {
row.to_vec()
};
softmax_row(&masked_row)
}
Nonlinearity::BooleanAttention => {
let masked_row: Vec<f64> = if let Some(m) = mask {
row.iter()
.zip(m.iter())
.map(|(&x, &masked)| if masked { f64::NEG_INFINITY } else { x })
.collect()
} else {
row.to_vec()
};
let max_idx = masked_row
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map_or(0, |(i, _)| i);
let mut result = vec![0.0; row.len()];
result[max_idx] = 1.0;
result
}
}
}
fn softmax_row(row: &[f64]) -> Vec<f64> {
crate::nn::functional::softmax_1d_f64(row)
}
fn threshold_matrix(matrix: &mut [Vec<f64>]) {
for row in matrix.iter_mut() {
for val in row.iter_mut() {
*val = if *val > 0.5 { 1.0 } else { 0.0 };
}
}
}
#[cfg(test)]
#[path = "ops_tests.rs"]
mod tests;