use linalg::{Matrix, BaseMatrix, BaseMatrixMut};
use linalg::Vector;
pub trait CostFunc<T> {
fn cost(outputs: &T, targets: &T) -> f64;
fn grad_cost(outputs: &T, targets: &T) -> T;
}
#[derive(Clone, Copy, Debug)]
pub struct MeanSqError;
impl CostFunc<Matrix<f64>> for MeanSqError {
fn cost(outputs: &Matrix<f64>, targets: &Matrix<f64>) -> f64 {
let diff = outputs - targets;
let sq_diff = &diff.elemul(&diff);
let n = diff.rows();
sq_diff.sum() / (2f64 * (n as f64))
}
fn grad_cost(outputs: &Matrix<f64>, targets: &Matrix<f64>) -> Matrix<f64> {
outputs - targets
}
}
impl CostFunc<Vector<f64>> for MeanSqError {
fn cost(outputs: &Vector<f64>, targets: &Vector<f64>) -> f64 {
let diff = outputs - targets;
let sq_diff = &diff.elemul(&diff);
let n = diff.size();
sq_diff.sum() / (2f64 * (n as f64))
}
fn grad_cost(outputs: &Vector<f64>, targets: &Vector<f64>) -> Vector<f64> {
outputs - targets
}
}
#[derive(Clone, Copy, Debug)]
pub struct CrossEntropyError;
impl CostFunc<Matrix<f64>> for CrossEntropyError {
fn cost(outputs: &Matrix<f64>, targets: &Matrix<f64>) -> f64 {
let log_inv_output = (-outputs + 1f64).apply(&ln);
let log_output = outputs.clone().apply(&ln);
let mat_cost = targets.elemul(&log_output) + (-targets + 1f64).elemul(&log_inv_output);
let n = outputs.rows();
-(mat_cost.sum()) / (n as f64)
}
fn grad_cost(outputs: &Matrix<f64>, targets: &Matrix<f64>) -> Matrix<f64> {
(outputs - targets).elediv(&(outputs.elemul(&(-outputs + 1f64))))
}
}
impl CostFunc<Vector<f64>> for CrossEntropyError {
fn cost(outputs: &Vector<f64>, targets: &Vector<f64>) -> f64 {
let log_inv_output = (-outputs + 1f64).apply(&ln);
let log_output = outputs.clone().apply(&ln);
let mat_cost = targets.elemul(&log_output) + (-targets + 1f64).elemul(&log_inv_output);
let n = outputs.size();
-(mat_cost.sum()) / (n as f64)
}
fn grad_cost(outputs: &Vector<f64>, targets: &Vector<f64>) -> Vector<f64> {
(outputs - targets).elediv(&(outputs.elemul(&(-outputs + 1f64))))
}
}
fn ln(x: f64) -> f64 {
x.ln()
}