#![allow(clippy::disallowed_methods)]
use aprender::autograd::{clear_graph, Tensor};
use aprender::nn::{loss::MSELoss, optim::SGD, Linear, Module, Sequential};
fn main() {
println!("=== Switch From PyTorch ===");
println!();
println!("| PyTorch | aprender |");
println!("|----------------------------|------------------------------------|");
println!("| torch.tensor([...]) | Tensor::new(&[...], &shape) |");
println!("| torch.nn.Linear(in, out) | Linear::new(in, out) |");
println!("| torch.nn.Sequential(...) | Sequential::new().add(...) |");
println!("| loss.backward() | loss.backward() |");
println!("| optimizer.step() | optimizer.step_with_params(&mut p) |");
println!("| optimizer.zero_grad() | clear_graph() |");
println!("| torch.nn.MSELoss() | MSELoss::new() |");
println!();
let mut model = Sequential::new()
.add(Linear::new(2, 4))
.add(Linear::new(4, 1));
let x = Tensor::new(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], &[4, 2]);
let y = Tensor::new(&[3.0, 7.0, 11.0, 15.0], &[4, 1]);
let loss_fn = MSELoss::new();
let mut optimizer = SGD::new(model.parameters_mut(), 0.01);
let mut initial_loss = 0.0_f32;
let mut final_loss = 0.0_f32;
for epoch in 0..100 {
clear_graph(); let x_g = x.clone().requires_grad();
let pred = model.forward(&x_g); let loss = loss_fn.forward(&pred, &y); let l = loss.item();
if epoch == 0 {
initial_loss = l;
}
loss.backward(); let mut p = model.parameters_mut();
optimizer.step_with_params(&mut p); final_loss = l;
}
println!("Training loop (100 epochs):");
println!(" Initial loss: {initial_loss:.4}");
println!(" Final loss: {final_loss:.4}");
assert!(final_loss < initial_loss, "Training must reduce loss");
println!();
println!("Key differences:");
println!(" 1. Ownership: aprender uses Rust ownership (no GC)");
println!(" 2. clear_graph() replaces zero_grad() (tape-based autograd)");
println!(" 3. step_with_params() takes &mut — Rust borrow checker enforced");
println!(" 4. No CUDA/Python interop overhead — pure Rust + SIMD");
println!();
println!("Chapter 24 contracts: PASSED");
}