use flodl::*;
use flodl::monitor::Monitor;
fn main() -> Result<()> {
manual_seed(42);
let model = FlowBuilder::from(Linear::new(2, 16)?)
.through(GELU)
.through(LayerNorm::new(16)?)
.also(Linear::new(16, 16)?)
.through(Linear::new(16, 2)?)
.build()?;
let params = model.parameters();
let mut optimizer = Adam::new(¶ms, 0.01);
model.train();
let opts = TensorOptions::default();
let batches: Vec<(Tensor, Tensor)> = (0..32)
.map(|_| {
let x = Tensor::randn(&[16, 2], opts).unwrap();
let y = Tensor::randn(&[16, 2], opts).unwrap();
(x, y)
})
.collect();
let num_epochs = 50;
let mut monitor = Monitor::new(num_epochs);
for epoch in 0..num_epochs {
let t = std::time::Instant::now();
for (input_t, target_t) in &batches {
let input = Variable::new(input_t.clone(), true);
let target = Variable::new(target_t.clone(), false);
let pred = model.forward(&input)?;
let loss = mse_loss(&pred, &target)?;
optimizer.zero_grad();
loss.backward()?;
clip_grad_norm(¶ms, 1.0)?;
optimizer.step()?;
model.record_scalar("loss", loss.item()?);
}
model.flush(&[]);
monitor.log(epoch, t.elapsed(), &model);
}
monitor.finish();
Ok(())
}