#![allow(clippy::disallowed_methods)]
use aprender::autograd::Tensor;
use aprender::nn::{
loss::CrossEntropyLoss, optim::Adam, Linear, Module, Optimizer, ReLU, Sequential, Softmax,
};
#[allow(clippy::too_many_lines)]
fn main() {
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
println!("โ Neural Network Classification with Aprender โ");
println!("โ Classifying 2D Points into 4 Quadrants โ");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n");
println!("๐ Dataset: Quadrant Classification");
println!(" Class 0: Q1 (+x, +y) | Class 1: Q2 (-x, +y)");
println!(" Class 2: Q3 (-x, -y) | Class 3: Q4 (+x, -y)\n");
let x_data = vec![
1.0, 1.0, 0.5, 0.8, -1.0, 1.0, -0.7, 0.6, -1.0, -1.0, -0.8, -0.5, 1.0, -1.0, 0.6, -0.9,
];
let x = Tensor::new(&x_data, &[8, 2]);
let y_data = vec![0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0];
let y = Tensor::new(&y_data, &[8]);
println!("๐๏ธ Building Model: MLP Classifier");
let mut model = Sequential::new()
.add(Linear::with_seed(2, 16, Some(42))) .add(ReLU::new())
.add(Linear::with_seed(16, 16, Some(43))) .add(ReLU::new())
.add(Linear::with_seed(16, 4, Some(44)));
println!(" Architecture: 2 โ 16 โ 16 โ 4");
println!(" Activation: ReLU (hidden)");
println!(" Output: 4 classes (quadrants)\n");
let loss_fn = CrossEntropyLoss::new();
let mut optimizer = Adam::new(model.parameters_mut(), 0.05);
println!("โ๏ธ Training Configuration:");
println!(" Loss: CrossEntropyLoss");
println!(" Optimizer: Adam (lr=0.05)");
println!(" Epochs: 300\n");
println!("๐ Training...\n");
println!(" Epoch Loss Accuracy");
println!(" โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
let epochs = 300;
for epoch in 0..epochs {
let logits = model.forward(&x);
let loss = loss_fn.forward(&logits, &y);
let loss_val = loss.data()[0];
loss.backward();
{
let mut params = model.parameters_mut();
optimizer.step_with_params(&mut params);
}
optimizer.zero_grad();
let accuracy = compute_accuracy(&logits, &y);
if epoch % 30 == 0 || epoch == epochs - 1 {
println!(
" {:>5} {:.4} {:.0}%",
epoch,
loss_val,
accuracy * 100.0
);
}
}
println!("\n๐ Training Complete!");
model.eval();
let final_logits = model.forward(&x);
let softmax = Softmax::new(-1);
let probs = softmax.forward(&final_logits);
println!("\n๐ Predictions:");
println!(" Point Target Predicted Confidence");
println!(" โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
let points = [
(1.0, 1.0),
(0.5, 0.8), (-1.0, 1.0),
(-0.7, 0.6), (-1.0, -1.0),
(-0.8, -0.5), (1.0, -1.0),
(0.6, -0.9), ];
let targets = [0, 0, 1, 1, 2, 2, 3, 3];
let quadrant_names = ["Q1", "Q2", "Q3", "Q4"];
let mut correct = 0;
for (i, ((px, py), &target)) in points.iter().zip(targets.iter()).enumerate() {
let row_start = i * 4;
let prob_slice = &probs.data()[row_start..row_start + 4];
let (pred_class, &max_prob) = prob_slice
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.expect("probability slice should not be empty");
let check = if pred_class == target {
correct += 1;
"โ"
} else {
"โ"
};
println!(
" ({:>4.1}, {:>4.1}) {} {} {:.1}% {}",
px,
py,
quadrant_names[target],
quadrant_names[pred_class],
max_prob * 100.0,
check
);
}
println!(
"\n Final Accuracy: {}/8 ({:.0}%)",
correct,
(correct as f32 / 8.0) * 100.0
);
println!("\n๐งช Testing on New Points:");
let test_points = vec![
2.0, 2.0, -3.0, 0.5, -0.1, -0.1, 0.5, -2.0, ];
let test_x = Tensor::new(&test_points, &[4, 2]);
let test_targets = [0, 1, 2, 3];
let test_logits = model.forward(&test_x);
let test_probs = softmax.forward(&test_logits);
println!(" Point Expected Predicted Confidence");
println!(" โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
let test_coords = [(2.0, 2.0), (-3.0, 0.5), (-0.1, -0.1), (0.5, -2.0)];
for (i, ((px, py), &expected)) in test_coords.iter().zip(test_targets.iter()).enumerate() {
let row_start = i * 4;
let prob_slice = &test_probs.data()[row_start..row_start + 4];
let (pred_class, &max_prob) = prob_slice
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.expect("probability slice should not be empty");
let check = if pred_class == expected { "โ" } else { "โ" };
println!(
" ({:>4.1}, {:>4.1}) {} {} {:.1}% {}",
px,
py,
quadrant_names[expected],
quadrant_names[pred_class],
max_prob * 100.0,
check
);
}
println!("\nโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
println!("โ Summary โ");
println!("โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ");
println!("โ โ Built MLP classifier with 4 output classes โ");
println!("โ โ Trained with CrossEntropyLoss โ");
println!("โ โ Used Softmax for probability outputs โ");
println!("โ โ Achieved classification of 2D quadrants โ");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n");
}
fn compute_accuracy(logits: &Tensor, targets: &Tensor) -> f32 {
let batch_size = logits.shape()[0];
let num_classes = logits.shape()[1];
let mut correct = 0;
for i in 0..batch_size {
let row_start = i * num_classes;
let logit_slice = &logits.data()[row_start..row_start + num_classes];
let pred_class = logit_slice
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.expect("logit slice should not be empty");
let target_class = targets.data()[i] as usize;
if pred_class == target_class {
correct += 1;
}
}
correct as f32 / batch_size as f32
}