use crate::autograd::Variable;
use crate::tensor::{Result, TensorError};
pub fn mse_loss(pred: &Variable, target: &Variable) -> Result<Variable> {
let result = pred.data().mse_loss(&target.data(), 1)?; Ok(Variable::wrap(result))
}
pub fn cross_entropy_loss(pred: &Variable, target: &Variable) -> Result<Variable> {
let pred_shape = pred.shape();
if pred_shape.len() != 2 {
return Err(TensorError::new("cross_entropy_loss: pred must be 2D [batch, classes]"));
}
let result = pred.data().cross_entropy_loss(
&target.data(), 1, -100, 0.0, )?;
Ok(Variable::wrap(result))
}
pub fn bce_with_logits_loss(pred: &Variable, target: &Variable) -> Result<Variable> {
let result = pred.data().bce_with_logits_loss(&target.data(), 1)?; Ok(Variable::wrap(result))
}
pub fn l1_loss(pred: &Variable, target: &Variable) -> Result<Variable> {
let result = pred.data().l1_loss(&target.data(), 1)?; Ok(Variable::wrap(result))
}
pub fn smooth_l1_loss(pred: &Variable, target: &Variable, beta: f64) -> Result<Variable> {
if beta <= 0.0 {
return Err(TensorError::new("smooth_l1_loss: beta must be positive"));
}
let result = pred.data().smooth_l1_loss(&target.data(), 1, beta)?; Ok(Variable::wrap(result))
}
pub fn kl_div_loss(input: &Variable, target: &Variable) -> Result<Variable> {
let shape = input.shape();
if shape.is_empty() {
return Err(TensorError::new("kl_div_loss: input must be at least 1D"));
}
let batch = shape[0] as f64;
let sum = input.data().kl_div_loss(&target.data(), 2, false)?; let result = sum.mul_scalar(1.0 / batch)?;
Ok(Variable::wrap(result))
}