use crate::tensor::Tensor;
pub fn relu(x: &Tensor) -> Tensor {
x.map(|v| if v > 0.0 { v } else { 0.0 })
}
pub fn relu_grad(x: &Tensor) -> Tensor {
x.map(|v| if v > 0.0 { 1.0 } else { 0.0 })
}
pub fn sigmoid(x: &Tensor) -> Tensor {
x.map(|v| 1.0 / (1.0 + (-v).exp()))
}
pub fn sigmoid_grad_from_out(sig_out: &Tensor) -> Tensor {
sig_out.map(|s| s * (1.0 - s))
}
pub fn tanh(x: &Tensor) -> Tensor {
x.map(|v| v.tanh())
}
pub fn tanh_grad_from_out(tanh_out: &Tensor) -> Tensor {
tanh_out.map(|t| 1.0 - t * t)
}
pub fn softmax(x: &Tensor) -> Tensor {
let mut out = x.clone();
for i in 0..x.rows {
let mut max = f32::NEG_INFINITY;
for j in 0..x.cols {
max = max.max(x.get(i, j));
}
let mut sum = 0.0;
for j in 0..x.cols {
let v = (x.get(i, j) - max).exp();
out.set(i, j, v);
sum += v;
}
for j in 0..x.cols {
out.set(i, j, out.get(i, j) / sum);
}
}
out
}