use crate::manual::TensorFloat;
use crate::manual::tensors::{Tensor, WithGrad};
use tensor_optim::TensorOps;
#[cfg(feature = "dyntensor")]
pub fn sgd(w: &mut WithGrad<Tensor<TensorFloat>>, lr: TensorFloat) {
let (params, grads) = w.split_mut();
let params_data = params.data_mut();
let grads_data = grads.data();
for (param, grad) in params_data.iter_mut().zip(grads_data.iter()) {
*param -= lr * *grad;
}
for grad in grads.data_mut() {
*grad = 0.0;
}
}
#[cfg(not(feature = "dyntensor"))]
pub fn sgd<const N: usize, const D: usize>(
w: &mut WithGrad<Tensor<TensorFloat, N, D>>,
lr: TensorFloat,
) {
let (params, grads) = w.split_mut();
let params_data = params.data_mut();
let grads_data = grads.data();
for (param, grad) in params_data.iter_mut().zip(grads_data.iter()) {
*param -= lr * *grad;
}
for grad in grads.data_mut() {
*grad = 0.0;
}
}