use ag::tensor_ops as T;
use scirs2_autograd as ag;
use scirs2_core::ndarray::array;
#[allow(dead_code)]
fn main() {
ag::run(|ctx| {
println!("=== Debug Norm Gradient Test ===");
let a = T::convert_to_tensor(array![[3.0, 4.0], [5.0, 12.0]], ctx);
println!(
"Input matrix A: {:?}",
a.eval(ctx).expect("Operation failed")
);
let norm = T::frobenius_norm(a);
let norm_result = norm.eval(ctx).expect("Operation failed");
println!("Frobenius norm: {}", norm_result[[]]);
let expected_norm = (194.0_f64).sqrt();
println!("Expected norm: {}", expected_norm);
println!("Norm tensor id: {}", norm.id());
println!("Input tensor id: {}", a.id());
let normshape = norm.shape();
println!("Norm shape: {:?}", normshape);
println!("Computing gradient...");
let grad_tensors = T::grad(&[norm], &[&a]);
let grad = grad_tensors[0];
println!("Gradient tensor id: {}", grad.id());
println!("Evaluating gradient...");
let grad_result = grad.eval(ctx).expect("Operation failed");
println!("Gradient result: {:?}", grad_result);
let input_array = a.eval(ctx).expect("Operation failed");
let expected_grad = input_array.mapv(|x| x / expected_norm);
println!("Expected gradient: {:?}", expected_grad);
});
}