1use ndarray::Array2;
2use rand::prelude::*;
3use rand::rngs::SmallRng;
4use scirs2_neural::callbacks::{CosineAnnealingLR, ScheduleMethod};
5use scirs2_neural::error::Result;
6use scirs2_neural::layers::Dense;
7use scirs2_neural::losses::MeanSquaredError;
8use scirs2_neural::models::{sequential::Sequential, Model};
9use scirs2_neural::optimizers::{with_cosine_annealing, with_step_decay, Adam, Optimizer};
10use std::time::Instant;
11
12fn create_xor_dataset() -> (Array2<f32>, Array2<f32>) {
14 let x = Array2::from_shape_vec(
16 (4, 2),
17 vec![
18 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, ],
23 )
24 .unwrap();
25
26 let y = Array2::from_shape_vec(
28 (4, 1),
29 vec![
30 0.0, 1.0, 1.0, 0.0, ],
35 )
36 .unwrap();
37
38 (x, y)
39}
40
41fn create_xor_model(rng: &mut SmallRng) -> Result<Sequential<f32>> {
43 let mut model = Sequential::new();
44
45 let dense1 = Dense::new(2, 8, Some("relu"), rng)?;
47 model.add_layer(dense1);
48
49 let dense2 = Dense::new(8, 4, Some("relu"), rng)?;
51 model.add_layer(dense2);
52
53 let dense3 = Dense::new(4, 1, Some("sigmoid"), rng)?;
55 model.add_layer(dense3);
56
57 Ok(model)
58}
59
60fn evaluate_model(model: &Sequential<f32>, x: &Array2<f32>, y: &Array2<f32>) -> Result<f32> {
62 let predictions = model.forward(&x.clone().into_dyn())?;
63 let binary_thresh = 0.5;
64
65 println!("\nModel predictions:");
66 println!("-----------------");
67 println!(" X₁ | X₂ | Target | Prediction | Binary");
68 println!("----------------------------------------------");
69
70 let mut correct = 0;
71 for i in 0..x.shape()[0] {
72 let pred = predictions[[i, 0]];
73 let binary_pred = pred > binary_thresh;
74 let target = y[[i, 0]];
75 let is_correct = (binary_pred as i32 as f32 - target).abs() < 1e-6;
76
77 if is_correct {
78 correct += 1;
79 }
80
81 println!(
82 " {:.4} | {:.4} | {:.4} | {:.4} | {} {}",
83 x[[i, 0]],
84 x[[i, 1]],
85 target,
86 pred,
87 binary_pred as i32,
88 if is_correct { "✓" } else { "✗" }
89 );
90 }
91
92 let accuracy = correct as f32 / x.shape()[0] as f32;
93 println!(
94 "\nAccuracy: {:.2}% ({}/{})",
95 accuracy * 100.0,
96 correct,
97 x.shape()[0]
98 );
99
100 Ok(accuracy)
101}
102
103fn main() -> Result<()> {
104 println!("Learning Rate Scheduler Integration Example");
105 println!("===========================================\n");
106
107 let mut rng = SmallRng::seed_from_u64(42);
109
110 let (x, y) = create_xor_dataset();
112 println!("Dataset created (XOR problem)");
113
114 train_with_step_decay(&mut rng, &x, &y)?;
116 train_with_cosine_annealing(&mut rng, &x, &y)?;
117 train_with_manual_scheduler_integration(&mut rng, &x, &y)?;
118
119 println!("\nAll training examples completed successfully!");
120 Ok(())
121}
122
123fn train_with_step_decay(rng: &mut SmallRng, x: &Array2<f32>, y: &Array2<f32>) -> Result<()> {
125 println!("\n1. Training with Step Decay Learning Rate Scheduling");
126 println!("--------------------------------------------------");
127
128 let mut model = create_xor_model(rng)?;
129 println!("Created model with {} layers", model.num_layers());
130
131 let loss_fn = MeanSquaredError::new();
133
134 let epochs = 300;
136 let mut optimizer = with_step_decay(
137 Adam::new(0.1, 0.9, 0.999, 1e-8),
138 0.1, 0.5, 50, 0.001, epochs, );
144
145 println!("Starting training with step decay LR scheduling...");
146 println!("Initial LR: 0.1, Factor: 0.5, Step size: 50 epochs");
147 let start_time = Instant::now();
148
149 let x_dyn = x.clone().into_dyn();
151 let y_dyn = y.clone().into_dyn();
152
153 let mut lr_history = Vec::<(usize, f32)>::new();
155
156 for epoch in 0..epochs {
157 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
159
160 let current_lr = optimizer.get_learning_rate();
162
163 if epoch == 0 || lr_history.is_empty() || lr_history.last().unwrap().1 != current_lr {
165 lr_history.push((epoch, current_lr));
166 }
167
168 if epoch % 50 == 0 || epoch == epochs - 1 {
170 println!(
171 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
172 epoch + 1,
173 epochs,
174 loss,
175 current_lr
176 );
177 }
178 }
179
180 let elapsed = start_time.elapsed();
181 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
182
183 println!("\nLearning rate changes:");
185 for (epoch, lr) in lr_history {
186 println!("Epoch {}: lr = {:.6}", epoch + 1, lr);
187 }
188
189 evaluate_model(&model, x, y)?;
191
192 Ok(())
193}
194
195fn train_with_cosine_annealing(rng: &mut SmallRng, x: &Array2<f32>, y: &Array2<f32>) -> Result<()> {
197 println!("\n2. Training with Cosine Annealing Learning Rate Scheduling");
198 println!("--------------------------------------------------------");
199
200 let mut model = create_xor_model(rng)?;
201 println!("Created model with {} layers", model.num_layers());
202
203 let loss_fn = MeanSquaredError::new();
205
206 let epochs = 300;
208 let cycle_length = 50;
209 let mut optimizer = with_cosine_annealing(
210 Adam::new(0.01, 0.9, 0.999, 1e-8),
211 0.01, 0.0001, cycle_length, epochs, );
216
217 println!("Starting training with cosine annealing LR scheduling...");
218 println!(
219 "Max LR: 0.01, Min LR: 0.0001, Cycle length: {} epochs",
220 cycle_length
221 );
222 let start_time = Instant::now();
223
224 let x_dyn = x.clone().into_dyn();
226 let y_dyn = y.clone().into_dyn();
227
228 let mut lr_samples = Vec::<(usize, f32)>::new();
230
231 for epoch in 0..epochs {
232 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
234
235 let current_lr = optimizer.get_learning_rate();
237
238 if epoch % 10 == 0 || epoch == epochs - 1 {
240 lr_samples.push((epoch, current_lr));
241 }
242
243 if epoch % 50 == 0 || epoch == epochs - 1 {
245 println!(
246 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
247 epoch + 1,
248 epochs,
249 loss,
250 current_lr
251 );
252 }
253 }
254
255 let elapsed = start_time.elapsed();
256 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
257
258 println!("\nLearning rate samples (showing cosine curve):");
260 for (epoch, lr) in lr_samples {
261 println!("Epoch {}: lr = {:.6}", epoch + 1, lr);
262 }
263
264 evaluate_model(&model, x, y)?;
266
267 Ok(())
268}
269
270fn train_with_manual_scheduler_integration(
272 rng: &mut SmallRng,
273 x: &Array2<f32>,
274 y: &Array2<f32>,
275) -> Result<()> {
276 println!("\n3. Training with Manual Scheduler Integration");
277 println!("-------------------------------------------");
278
279 let mut model = create_xor_model(rng)?;
280 println!("Created model with {} layers", model.num_layers());
281
282 let loss_fn = MeanSquaredError::new();
284 let mut optimizer = Adam::new(0.01, 0.9, 0.999, 1e-8);
285
286 let epochs = 300;
288 let scheduler = CosineAnnealingLR::new(
289 0.01, 0.0001, 100, ScheduleMethod::Epoch,
293 epochs, );
295
296 println!("Starting training with manual scheduler integration...");
297 println!("Max LR: 0.01, Min LR: 0.0001, Cycle length: 100 epochs");
298 let start_time = Instant::now();
299
300 let x_dyn = x.clone().into_dyn();
302 let y_dyn = y.clone().into_dyn();
303
304 for epoch in 0..epochs {
306 let current_lr = scheduler.calculate_lr(epoch);
308 optimizer.set_learning_rate(current_lr);
309
310 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
312
313 if epoch % 50 == 0 || epoch == epochs - 1 {
315 println!(
316 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
317 epoch + 1,
318 epochs,
319 loss,
320 current_lr
321 );
322 }
323 }
324
325 let elapsed = start_time.elapsed();
326 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
327
328 evaluate_model(&model, x, y)?;
330
331 Ok(())
332}