pub struct Sequential<F: Float + Debug + ScalarOperand + 'static> { /* private fields */ }
Expand description
A sequential model that chains layers together in a linear sequence
Implementations§
Source§impl<F: Float + Debug + ScalarOperand + 'static> Sequential<F>
impl<F: Float + Debug + ScalarOperand + 'static> Sequential<F>
Sourcepub fn new() -> Self
pub fn new() -> Self
Create a new empty sequential model
Examples found in repository?
examples/model_visualization_simple.rs (line 48)
47fn create_mlp_model<R: rand::Rng>(rng: &mut R) -> Result<Sequential<f64>> {
48 let mut model = Sequential::new();
49
50 // Input layer is implicitly defined by the first layer
51 // Hidden layers
52 model.add_layer(Dense::new(784, 128, Some("relu"), rng)?);
53 model.add_layer(Dense::new(128, 64, Some("relu"), rng)?);
54
55 // Output layer
56 model.add_layer(Dense::new(64, 10, Some("softmax"), rng)?);
57
58 Ok(model)
59}
More examples
examples/advanced_callbacks.rs (line 40)
39fn create_regression_model(input_dim: usize, rng: &mut SmallRng) -> Result<Sequential<f32>> {
40 let mut model = Sequential::new();
41
42 // Input layer
43 let dense1 = Dense::new(input_dim, 16, Some("relu"), rng)?;
44 model.add_layer(dense1);
45
46 // Hidden layers
47 let dense2 = Dense::new(16, 8, Some("relu"), rng)?;
48 model.add_layer(dense2);
49
50 // Output layer (linear activation for regression)
51 let dense3 = Dense::new(8, 1, None, rng)?;
52 model.add_layer(dense3);
53
54 Ok(model)
55}
examples/scheduler_optimizer.rs (line 43)
42fn create_xor_model(rng: &mut SmallRng) -> Result<Sequential<f32>> {
43 let mut model = Sequential::new();
44
45 // Input layer with 2 neurons (XOR has 2 inputs)
46 let dense1 = Dense::new(2, 8, Some("relu"), rng)?;
47 model.add_layer(dense1);
48
49 // Hidden layer
50 let dense2 = Dense::new(8, 4, Some("relu"), rng)?;
51 model.add_layer(dense2);
52
53 // Output layer with 1 neuron (XOR has 1 output)
54 let dense3 = Dense::new(4, 1, Some("sigmoid"), rng)?;
55 model.add_layer(dense3);
56
57 Ok(model)
58}
examples/training_callbacks.rs (line 44)
43fn create_xor_model(rng: &mut SmallRng) -> Result<Sequential<f32>> {
44 let mut model = Sequential::new();
45
46 // Input layer with 2 neurons (XOR has 2 inputs)
47 let dense1 = Dense::new(2, 8, Some("relu"), rng)?;
48 model.add_layer(dense1);
49
50 // Hidden layer
51 let dense2 = Dense::new(8, 4, Some("relu"), rng)?;
52 model.add_layer(dense2);
53
54 // Output layer with 1 neuron (XOR has 1 output)
55 let dense3 = Dense::new(4, 1, Some("sigmoid"), rng)?;
56 model.add_layer(dense3);
57
58 Ok(model)
59}
examples/neural_network_xor.rs (line 86)
84fn create_model<R: Rng>(rng: &mut R) -> Result<impl Model<f32>> {
85 // Create a sequential model
86 let mut model = Sequential::new();
87
88 // First layer: 2 inputs -> 4 hidden neurons with ReLU activation
89 // 2 inputs (x1, x2)
90 let layer1 = Dense::new(2, 4, Some("relu"), rng)?;
91 model.add_layer(layer1);
92
93 // Output layer: 4 hidden neurons -> 1 output (no activation for regression)
94 // No activation for simple regression
95 let layer2 = Dense::new(4, 1, None, rng)?;
96 model.add_layer(layer2);
97
98 Ok(model)
99}
examples/visualize_training_progress.rs (line 66)
65fn create_regression_model(input_dim: usize, rng: &mut SmallRng) -> Result<Sequential<f32>> {
66 let mut model = Sequential::new();
67
68 // Hidden layer with 16 neurons and ReLU activation
69 let dense1 = Dense::new(input_dim, 16, Some("relu"), rng)?;
70 model.add_layer(dense1);
71
72 // Hidden layer with 8 neurons and ReLU activation
73 let dense2 = Dense::new(16, 8, Some("relu"), rng)?;
74 model.add_layer(dense2);
75
76 // Output layer with 1 neuron and linear activation
77 let dense3 = Dense::new(8, 1, None, rng)?;
78 model.add_layer(dense3);
79
80 Ok(model)
81}
Sourcepub fn from_layers(layers: Vec<Box<dyn Layer<F> + Send + Sync>>) -> Self
pub fn from_layers(layers: Vec<Box<dyn Layer<F> + Send + Sync>>) -> Self
Create a new sequential model from existing layers
Sourcepub fn add_layer<L: Layer<F> + 'static + Send + Sync>(
&mut self,
layer: L,
) -> &mut Self
pub fn add_layer<L: Layer<F> + 'static + Send + Sync>( &mut self, layer: L, ) -> &mut Self
Add a layer to the model
Examples found in repository?
examples/model_visualization_simple.rs (line 52)
47fn create_mlp_model<R: rand::Rng>(rng: &mut R) -> Result<Sequential<f64>> {
48 let mut model = Sequential::new();
49
50 // Input layer is implicitly defined by the first layer
51 // Hidden layers
52 model.add_layer(Dense::new(784, 128, Some("relu"), rng)?);
53 model.add_layer(Dense::new(128, 64, Some("relu"), rng)?);
54
55 // Output layer
56 model.add_layer(Dense::new(64, 10, Some("softmax"), rng)?);
57
58 Ok(model)
59}
More examples
examples/advanced_callbacks.rs (line 44)
39fn create_regression_model(input_dim: usize, rng: &mut SmallRng) -> Result<Sequential<f32>> {
40 let mut model = Sequential::new();
41
42 // Input layer
43 let dense1 = Dense::new(input_dim, 16, Some("relu"), rng)?;
44 model.add_layer(dense1);
45
46 // Hidden layers
47 let dense2 = Dense::new(16, 8, Some("relu"), rng)?;
48 model.add_layer(dense2);
49
50 // Output layer (linear activation for regression)
51 let dense3 = Dense::new(8, 1, None, rng)?;
52 model.add_layer(dense3);
53
54 Ok(model)
55}
examples/scheduler_optimizer.rs (line 47)
42fn create_xor_model(rng: &mut SmallRng) -> Result<Sequential<f32>> {
43 let mut model = Sequential::new();
44
45 // Input layer with 2 neurons (XOR has 2 inputs)
46 let dense1 = Dense::new(2, 8, Some("relu"), rng)?;
47 model.add_layer(dense1);
48
49 // Hidden layer
50 let dense2 = Dense::new(8, 4, Some("relu"), rng)?;
51 model.add_layer(dense2);
52
53 // Output layer with 1 neuron (XOR has 1 output)
54 let dense3 = Dense::new(4, 1, Some("sigmoid"), rng)?;
55 model.add_layer(dense3);
56
57 Ok(model)
58}
examples/training_callbacks.rs (line 48)
43fn create_xor_model(rng: &mut SmallRng) -> Result<Sequential<f32>> {
44 let mut model = Sequential::new();
45
46 // Input layer with 2 neurons (XOR has 2 inputs)
47 let dense1 = Dense::new(2, 8, Some("relu"), rng)?;
48 model.add_layer(dense1);
49
50 // Hidden layer
51 let dense2 = Dense::new(8, 4, Some("relu"), rng)?;
52 model.add_layer(dense2);
53
54 // Output layer with 1 neuron (XOR has 1 output)
55 let dense3 = Dense::new(4, 1, Some("sigmoid"), rng)?;
56 model.add_layer(dense3);
57
58 Ok(model)
59}
examples/neural_network_xor.rs (line 91)
84fn create_model<R: Rng>(rng: &mut R) -> Result<impl Model<f32>> {
85 // Create a sequential model
86 let mut model = Sequential::new();
87
88 // First layer: 2 inputs -> 4 hidden neurons with ReLU activation
89 // 2 inputs (x1, x2)
90 let layer1 = Dense::new(2, 4, Some("relu"), rng)?;
91 model.add_layer(layer1);
92
93 // Output layer: 4 hidden neurons -> 1 output (no activation for regression)
94 // No activation for simple regression
95 let layer2 = Dense::new(4, 1, None, rng)?;
96 model.add_layer(layer2);
97
98 Ok(model)
99}
examples/visualize_training_progress.rs (line 70)
65fn create_regression_model(input_dim: usize, rng: &mut SmallRng) -> Result<Sequential<f32>> {
66 let mut model = Sequential::new();
67
68 // Hidden layer with 16 neurons and ReLU activation
69 let dense1 = Dense::new(input_dim, 16, Some("relu"), rng)?;
70 model.add_layer(dense1);
71
72 // Hidden layer with 8 neurons and ReLU activation
73 let dense2 = Dense::new(16, 8, Some("relu"), rng)?;
74 model.add_layer(dense2);
75
76 // Output layer with 1 neuron and linear activation
77 let dense3 = Dense::new(8, 1, None, rng)?;
78 model.add_layer(dense3);
79
80 Ok(model)
81}
Sourcepub fn num_layers(&self) -> usize
pub fn num_layers(&self) -> usize
Get the number of layers in the model
Examples found in repository?
examples/scheduler_optimizer.rs (line 129)
124fn train_with_step_decay(rng: &mut SmallRng, x: &Array2<f32>, y: &Array2<f32>) -> Result<()> {
125 println!("\n1. Training with Step Decay Learning Rate Scheduling");
126 println!("--------------------------------------------------");
127
128 let mut model = create_xor_model(rng)?;
129 println!("Created model with {} layers", model.num_layers());
130
131 // Setup loss function and optimizer with step decay scheduling
132 let loss_fn = MeanSquaredError::new();
133
134 // Option 1: Using the helper function
135 let epochs = 300;
136 let mut optimizer = with_step_decay(
137 Adam::new(0.1, 0.9, 0.999, 1e-8),
138 0.1, // Initial LR
139 0.5, // Factor (reduce by half)
140 50, // Step size (every 50 epochs)
141 0.001, // Min LR
142 epochs, // Total steps
143 );
144
145 println!("Starting training with step decay LR scheduling...");
146 println!("Initial LR: 0.1, Factor: 0.5, Step size: 50 epochs");
147 let start_time = Instant::now();
148
149 // Convert to dynamic arrays
150 let x_dyn = x.clone().into_dyn();
151 let y_dyn = y.clone().into_dyn();
152
153 // Training loop with learning rate tracking
154 let mut lr_history = Vec::<(usize, f32)>::new();
155
156 for epoch in 0..epochs {
157 // Train one batch
158 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
159
160 // Record current learning rate
161 let current_lr = optimizer.get_learning_rate();
162
163 // Track learning rate changes
164 if epoch == 0 || lr_history.is_empty() || lr_history.last().unwrap().1 != current_lr {
165 lr_history.push((epoch, current_lr));
166 }
167
168 // Print progress
169 if epoch % 50 == 0 || epoch == epochs - 1 {
170 println!(
171 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
172 epoch + 1,
173 epochs,
174 loss,
175 current_lr
176 );
177 }
178 }
179
180 let elapsed = start_time.elapsed();
181 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
182
183 // Print learning rate history
184 println!("\nLearning rate changes:");
185 for (epoch, lr) in lr_history {
186 println!("Epoch {}: lr = {:.6}", epoch + 1, lr);
187 }
188
189 // Evaluate the model
190 evaluate_model(&model, x, y)?;
191
192 Ok(())
193}
194
195// Train with cosine annealing learning rate scheduling
196fn train_with_cosine_annealing(rng: &mut SmallRng, x: &Array2<f32>, y: &Array2<f32>) -> Result<()> {
197 println!("\n2. Training with Cosine Annealing Learning Rate Scheduling");
198 println!("--------------------------------------------------------");
199
200 let mut model = create_xor_model(rng)?;
201 println!("Created model with {} layers", model.num_layers());
202
203 // Setup loss function and optimizer with cosine annealing scheduling
204 let loss_fn = MeanSquaredError::new();
205
206 // Using the helper function for cosine annealing
207 let epochs = 300;
208 let cycle_length = 50;
209 let mut optimizer = with_cosine_annealing(
210 Adam::new(0.01, 0.9, 0.999, 1e-8),
211 0.01, // Max LR
212 0.0001, // Min LR
213 cycle_length, // Cycle length
214 epochs, // Total steps
215 );
216
217 println!("Starting training with cosine annealing LR scheduling...");
218 println!(
219 "Max LR: 0.01, Min LR: 0.0001, Cycle length: {} epochs",
220 cycle_length
221 );
222 let start_time = Instant::now();
223
224 // Convert to dynamic arrays
225 let x_dyn = x.clone().into_dyn();
226 let y_dyn = y.clone().into_dyn();
227
228 // Training loop with learning rate tracking
229 let mut lr_samples = Vec::<(usize, f32)>::new();
230
231 for epoch in 0..epochs {
232 // Train one batch
233 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
234
235 // Get current learning rate
236 let current_lr = optimizer.get_learning_rate();
237
238 // Record learning rate at specific points to show the cycle
239 if epoch % 10 == 0 || epoch == epochs - 1 {
240 lr_samples.push((epoch, current_lr));
241 }
242
243 // Print progress
244 if epoch % 50 == 0 || epoch == epochs - 1 {
245 println!(
246 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
247 epoch + 1,
248 epochs,
249 loss,
250 current_lr
251 );
252 }
253 }
254
255 let elapsed = start_time.elapsed();
256 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
257
258 // Print learning rate samples to demonstrate the cosine curve
259 println!("\nLearning rate samples (showing cosine curve):");
260 for (epoch, lr) in lr_samples {
261 println!("Epoch {}: lr = {:.6}", epoch + 1, lr);
262 }
263
264 // Evaluate the model
265 evaluate_model(&model, x, y)?;
266
267 Ok(())
268}
269
270// Train with manual scheduler integration
271fn train_with_manual_scheduler_integration(
272 rng: &mut SmallRng,
273 x: &Array2<f32>,
274 y: &Array2<f32>,
275) -> Result<()> {
276 println!("\n3. Training with Manual Scheduler Integration");
277 println!("-------------------------------------------");
278
279 let mut model = create_xor_model(rng)?;
280 println!("Created model with {} layers", model.num_layers());
281
282 // Setup loss function and optimizer
283 let loss_fn = MeanSquaredError::new();
284 let mut optimizer = Adam::new(0.01, 0.9, 0.999, 1e-8);
285
286 // Create scheduler manually
287 let epochs = 300;
288 let scheduler = CosineAnnealingLR::new(
289 0.01, // Max LR
290 0.0001, // Min LR
291 100, // Cycle length
292 ScheduleMethod::Epoch,
293 epochs, // Total steps
294 );
295
296 println!("Starting training with manual scheduler integration...");
297 println!("Max LR: 0.01, Min LR: 0.0001, Cycle length: 100 epochs");
298 let start_time = Instant::now();
299
300 // Convert to dynamic arrays
301 let x_dyn = x.clone().into_dyn();
302 let y_dyn = y.clone().into_dyn();
303
304 // Training loop with manual scheduler updates
305 for epoch in 0..epochs {
306 // Update learning rate using scheduler
307 let current_lr = scheduler.calculate_lr(epoch);
308 optimizer.set_learning_rate(current_lr);
309
310 // Train one batch
311 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
312
313 // Print progress
314 if epoch % 50 == 0 || epoch == epochs - 1 {
315 println!(
316 "Epoch {}/{}: loss = {:.6}, lr = {:.6}",
317 epoch + 1,
318 epochs,
319 loss,
320 current_lr
321 );
322 }
323 }
324
325 let elapsed = start_time.elapsed();
326 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
327
328 // Evaluate the model
329 evaluate_model(&model, x, y)?;
330
331 Ok(())
332}
More examples
examples/training_callbacks.rs (line 132)
127fn train_with_early_stopping(rng: &mut SmallRng, x: &Array2<f32>, y: &Array2<f32>) -> Result<()> {
128 println!("\n1. Training with Early Stopping");
129 println!("------------------------------");
130
131 let mut model = create_xor_model(rng)?;
132 println!("Created model with {} layers", model.num_layers());
133
134 // Setup loss function and optimizer
135 let loss_fn = MeanSquaredError::new();
136 let mut optimizer = Adam::new(0.01, 0.9, 0.999, 1e-8);
137
138 // Setup early stopping callback
139 // Stop training if loss doesn't improve for 20 epochs
140 // with a minimum improvement of 0.0001
141 let early_stopping = EarlyStopping::new(20, 0.0001, false);
142 let mut callback_manager = CallbackManager::<f32>::new();
143 callback_manager.add_callback(Box::new(early_stopping));
144
145 println!("Starting training with early stopping (patience = 20 epochs)...");
146 let start_time = Instant::now();
147
148 // Set up batch training parameters
149 let x_dyn = x.clone().into_dyn();
150 let y_dyn = y.clone().into_dyn();
151 let max_epochs = 200;
152
153 // Training loop
154 let mut epoch_metrics = HashMap::new();
155 let mut stop_training = false;
156
157 for epoch in 0..max_epochs {
158 // Call callbacks before epoch
159 callback_manager.on_epoch_begin(epoch)?;
160
161 // Train one batch
162 let loss = model.train_batch(&x_dyn, &y_dyn, &loss_fn, &mut optimizer)?;
163
164 // Update metrics
165 epoch_metrics.insert("loss".to_string(), loss);
166
167 // Call callbacks after epoch
168 let should_stop = callback_manager.on_epoch_end(epoch, &epoch_metrics)?;
169
170 if should_stop {
171 println!("Early stopping triggered after {} epochs", epoch + 1);
172 stop_training = true;
173 }
174
175 // Print progress
176 if epoch % 20 == 0 || epoch == max_epochs - 1 || stop_training {
177 println!("Epoch {}/{}: loss = {:.6}", epoch + 1, max_epochs, loss);
178 }
179
180 if stop_training {
181 break;
182 }
183 }
184
185 let elapsed = start_time.elapsed();
186 println!(
187 "Training completed in {:.2}s{}",
188 elapsed.as_secs_f32(),
189 if stop_training {
190 " (early stopped)"
191 } else {
192 ""
193 }
194 );
195
196 // Evaluate the model
197 evaluate_model(&model, x, y)?;
198
199 Ok(())
200}
examples/advanced_callbacks.rs (line 129)
121fn train_with_early_stopping(
122 rng: &mut SmallRng,
123 x_train: &Array2<f32>,
124 y_train: &Array2<f32>,
125 x_val: &Array2<f32>,
126 y_val: &Array2<f32>,
127) -> Result<Sequential<f32>> {
128 let mut model = create_regression_model(x_train.ncols(), rng)?;
129 println!("Created model with {} layers", model.num_layers());
130
131 // Setup loss function and optimizer
132 let loss_fn = MeanSquaredError::new();
133 let mut optimizer = Adam::new(0.01, 0.9, 0.999, 1e-8);
134
135 // Setup early stopping callback
136 // Stop training if validation loss doesn't improve for 30 epochs
137 let early_stopping = EarlyStopping::new(30, 0.0001, true);
138 let mut callback_manager = CallbackManager::<f32>::new();
139 callback_manager.add_callback(Box::new(early_stopping));
140
141 println!("Starting training with early stopping (patience = 30 epochs)...");
142 let start_time = Instant::now();
143
144 // Convert to dynamic arrays
145 let x_train_dyn = x_train.clone().into_dyn();
146 let y_train_dyn = y_train.clone().into_dyn();
147
148 // Set up training parameters
149 let max_epochs = 500;
150
151 // Training loop with validation
152 let mut epoch_metrics = HashMap::new();
153 let mut best_val_loss = f32::MAX;
154 let mut stop_training = false;
155
156 for epoch in 0..max_epochs {
157 // Call callbacks before epoch
158 callback_manager.on_epoch_begin(epoch)?;
159
160 // Train one epoch
161 let train_loss = model.train_batch(&x_train_dyn, &y_train_dyn, &loss_fn, &mut optimizer)?;
162
163 // Validate
164 let val_loss = calculate_mse(&model, x_val, y_val)?;
165
166 // Update metrics
167 epoch_metrics.insert("loss".to_string(), train_loss);
168 epoch_metrics.insert("val_loss".to_string(), val_loss);
169
170 // Call callbacks after epoch
171 let should_stop = callback_manager.on_epoch_end(epoch, &epoch_metrics)?;
172
173 if should_stop {
174 println!("Early stopping triggered after {} epochs", epoch + 1);
175 stop_training = true;
176 }
177
178 // Track best validation loss
179 if val_loss < best_val_loss {
180 best_val_loss = val_loss;
181 }
182
183 // Print progress
184 if epoch % 50 == 0 || epoch == max_epochs - 1 || stop_training {
185 println!(
186 "Epoch {}/{}: train_loss = {:.6}, val_loss = {:.6}",
187 epoch + 1,
188 max_epochs,
189 train_loss,
190 val_loss
191 );
192 }
193
194 if stop_training {
195 break;
196 }
197 }
198
199 let elapsed = start_time.elapsed();
200 println!("Training completed in {:.2}s", elapsed.as_secs_f32());
201 println!("Best validation MSE: {:.6}", best_val_loss);
202
203 Ok(model)
204}
examples/model_serialization_example.rs (line 46)
9fn main() -> Result<(), Box<dyn std::error::Error>> {
10 println!("Model Serialization Example");
11
12 // Initialize random number generator
13 let mut rng = SmallRng::seed_from_u64(42);
14
15 // 1. Create a simple neural network model
16 let mut model = Sequential::new();
17
18 // Add layers
19 let input_dim = 784; // MNIST image size: 28x28 = 784
20 let hidden_dim_1 = 256;
21 let hidden_dim_2 = 128;
22 let output_dim = 10; // 10 classes for digits 0-9
23
24 // Input layer to first hidden layer
25 let dense1 = Dense::new(input_dim, hidden_dim_1, Some("relu"), &mut rng)?;
26 model.add_layer(dense1);
27
28 // Dropout for regularization
29 let dropout1 = Dropout::new(0.2, &mut rng)?;
30 model.add_layer(dropout1);
31
32 // First hidden layer to second hidden layer
33 let dense2 = Dense::new(hidden_dim_1, hidden_dim_2, Some("relu"), &mut rng)?;
34 model.add_layer(dense2);
35
36 // Layer normalization
37 let layer_norm = LayerNorm::new(hidden_dim_2, 1e-5, &mut rng)?;
38 model.add_layer(layer_norm);
39
40 // Second hidden layer to output layer
41 let dense3 = Dense::new(hidden_dim_2, output_dim, Some("softmax"), &mut rng)?;
42 model.add_layer(dense3);
43
44 println!(
45 "Created a neural network with {} layers",
46 model.num_layers()
47 );
48
49 // 2. Test the model with some dummy input
50 let batch_size = 2;
51 let input = Array2::<f32>::from_elem((batch_size, input_dim), 0.1);
52 let output = model.forward(&input.clone().into_dyn())?;
53
54 println!("Model output shape: {:?}", output.shape());
55 println!("First few output values:");
56 for i in 0..batch_size {
57 print!("Sample {}: [ ", i);
58 for j in 0..5 {
59 // Print first 5 values
60 print!("{:.6} ", output[[i, j]]);
61 }
62 println!("... ]");
63 }
64
65 // 3. Save the model to a file
66 let model_path = Path::new("mnist_model.json");
67 serialization::save_model(&model, model_path, SerializationFormat::JSON)?;
68
69 println!("\nModel saved to {}", model_path.display());
70
71 // 4. Load the model from the file
72 let loaded_model = serialization::load_model::<f32, _>(model_path, SerializationFormat::JSON)?;
73
74 println!(
75 "Model loaded successfully with {} layers",
76 loaded_model.num_layers()
77 );
78
79 // 5. Test the loaded model with the same input
80 let loaded_output = loaded_model.forward(&input.into_dyn())?;
81
82 println!("\nLoaded model output shape: {:?}", loaded_output.shape());
83 println!("First few output values:");
84 for i in 0..batch_size {
85 print!("Sample {}: [ ", i);
86 for j in 0..5 {
87 // Print first 5 values
88 print!("{:.6} ", loaded_output[[i, j]]);
89 }
90 println!("... ]");
91 }
92
93 // 6. Compare original and loaded model outputs
94 let mut max_diff = 0.0;
95 for i in 0..batch_size {
96 for j in 0..output_dim {
97 let diff = (output[[i, j]] - loaded_output[[i, j]]).abs();
98 if diff > max_diff {
99 max_diff = diff;
100 }
101 }
102 }
103
104 println!(
105 "\nMaximum difference between original and loaded model outputs: {:.6}",
106 max_diff
107 );
108
109 if max_diff < 1e-6 {
110 println!("Models are identical! Serialization and deserialization worked correctly.");
111 } else {
112 println!("Warning: There are differences between the original and loaded models.");
113 println!(
114 "This might be due to numerical precision issues or a problem with serialization."
115 );
116 }
117
118 Ok(())
119}
examples/improved_model_serialization.rs (line 178)
165fn main() -> Result<()> {
166 println!("Improved Model Serialization and Loading Example");
167 println!("===============================================\n");
168
169 // Initialize random number generator
170 let mut rng = SmallRng::seed_from_u64(42);
171
172 // 1. Create XOR datasets
173 let (x_train, y_train) = create_xor_dataset();
174 println!("XOR dataset created");
175
176 // 2. Create and train the model
177 let mut model = create_xor_model(&mut rng)?;
178 println!("Model created with {} layers", model.num_layers());
179
180 // Train the model
181 train_model(&mut model, &x_train, &y_train, 2000)?;
182
183 // 3. Evaluate the model before saving
184 println!("\nEvaluating model before saving:");
185 evaluate_model(&model, &x_train, &y_train)?;
186
187 // 4. Save the model in different formats
188 println!("\nSaving model in different formats...");
189
190 // Save in JSON format (human-readable)
191 let json_path = Path::new("xor_model.json");
192 serialization::save_model(&model, json_path, SerializationFormat::JSON)?;
193 println!("Model saved to {} in JSON format", json_path.display());
194
195 // Save in CBOR format (compact binary)
196 let cbor_path = Path::new("xor_model.cbor");
197 serialization::save_model(&model, cbor_path, SerializationFormat::CBOR)?;
198 println!("Model saved to {} in CBOR format", cbor_path.display());
199
200 // Save in MessagePack format (efficient binary)
201 let msgpack_path = Path::new("xor_model.msgpack");
202 serialization::save_model(&model, msgpack_path, SerializationFormat::MessagePack)?;
203 println!(
204 "Model saved to {} in MessagePack format",
205 msgpack_path.display()
206 );
207
208 // 5. Load models from each format and evaluate
209 println!("\nLoading and evaluating models from each format:");
210
211 // Load and evaluate JSON model
212 println!("\n--- JSON Format ---");
213 let json_model = serialization::load_model::<f32, _>(json_path, SerializationFormat::JSON)?;
214 println!("JSON model loaded with {} layers", json_model.num_layers());
215 evaluate_model(&json_model, &x_train, &y_train)?;
216
217 // Load and evaluate CBOR model
218 println!("\n--- CBOR Format ---");
219 let cbor_model = serialization::load_model::<f32, _>(cbor_path, SerializationFormat::CBOR)?;
220 println!("CBOR model loaded with {} layers", cbor_model.num_layers());
221 evaluate_model(&cbor_model, &x_train, &y_train)?;
222
223 // Load and evaluate MessagePack model
224 println!("\n--- MessagePack Format ---");
225 let msgpack_model =
226 serialization::load_model::<f32, _>(msgpack_path, SerializationFormat::MessagePack)?;
227 println!(
228 "MessagePack model loaded with {} layers",
229 msgpack_model.num_layers()
230 );
231 evaluate_model(&msgpack_model, &x_train, &y_train)?;
232
233 // 6. Test with a larger, noisy dataset to verify model works with unseen data
234 println!("\nTesting with larger, noisy dataset:");
235 let (x_test, y_test) = create_noisy_xor_dataset(100, 0.2, &mut rng);
236 evaluate_model(&model, &x_test, &y_test)?;
237
238 // File sizes for comparison
239 let json_size = std::fs::metadata(json_path)?.len();
240 let cbor_size = std::fs::metadata(cbor_path)?.len();
241 let msgpack_size = std::fs::metadata(msgpack_path)?.len();
242
243 println!("\nSerialization Format Comparison:");
244 println!(" JSON: {} bytes", json_size);
245 println!(
246 " CBOR: {} bytes ({:.1}% of JSON)",
247 cbor_size,
248 (cbor_size as f64 / json_size as f64) * 100.0
249 );
250 println!(
251 " MessagePack: {} bytes ({:.1}% of JSON)",
252 msgpack_size,
253 (msgpack_size as f64 / json_size as f64) * 100.0
254 );
255
256 println!("\nModel serialization and loading example completed successfully!");
257 Ok(())
258}
examples/visualize_training_progress.rs (line 105)
83fn main() -> Result<()> {
84 println!("Training Visualization Example");
85 println!("==============================\n");
86
87 // Initialize RNG with a fixed seed for reproducibility
88 let mut rng = SmallRng::seed_from_u64(42);
89
90 // Generate synthetic data
91 let num_samples = 200;
92 let (x, y) = generate_nonlinear_data(num_samples, &mut rng);
93 println!("Generated synthetic dataset with {} samples", num_samples);
94
95 // Split data into training and validation sets
96 let (x_train, y_train, x_val, y_val) = train_val_split(&x, &y, 0.8);
97 println!(
98 "Split data: {} training samples, {} validation samples",
99 x_train.shape()[0],
100 x_val.shape()[0]
101 );
102
103 // Create model
104 let mut model = create_regression_model(1, &mut rng)?;
105 println!("Created model with {} layers", model.num_layers());
106
107 // Setup loss function and optimizer
108 let loss_fn = MeanSquaredError::new();
109 let mut optimizer = Adam::new(0.01, 0.9, 0.999, 1e-8);
110
111 // Training parameters
112 let epochs = 100;
113
114 // Configure learning rate scheduler
115 let mut scheduler = StepDecay::new(
116 0.01, // Initial learning rate
117 0.5, // Decay factor
118 30, // Step size
119 ScheduleMethod::Epoch,
120 1e-4, // Min learning rate
121 );
122
123 // Add visualization callback
124 let mut visualization_cb =
125 VisualizationCallback::new(5) // Show every 5 epochs
126 .with_tracked_metrics(vec!["train_loss".to_string(), "val_loss".to_string()])
127 .with_plot_options(PlotOptions {
128 width: 80,
129 height: 15,
130 max_x_ticks: 10,
131 max_y_ticks: 5,
132 line_char: '─',
133 point_char: '●',
134 background_char: ' ',
135 show_grid: true,
136 show_legend: true,
137 })
138 .with_save_path("training_plot.txt");
139
140 // Train the model manually
141 let mut epoch_history = HashMap::new();
142 epoch_history.insert("train_loss".to_string(), Vec::new());
143 epoch_history.insert("val_loss".to_string(), Vec::new());
144
145 // Convert data to dynamic arrays and ensure they are owned (not views)
146 let x_train_dyn = x_train.clone().into_dyn();
147 let y_train_dyn = y_train.clone().into_dyn();
148 let x_val_dyn = x_val.clone().into_dyn();
149 let y_val_dyn = y_val.clone().into_dyn();
150
151 println!("\nStarting training with visualization...");
152
153 // Manual training loop
154 println!("\nStarting training loop...");
155 for epoch in 0..epochs {
156 // Update learning rate with scheduler
157 let current_lr = scheduler.get_lr();
158 optimizer.set_learning_rate(current_lr);
159
160 // Train for one epoch (batch size = full dataset in this example)
161 let train_loss = model.train_batch(&x_train_dyn, &y_train_dyn, &loss_fn, &mut optimizer)?;
162
163 // Compute validation loss
164 let predictions = model.forward(&x_val_dyn)?;
165 let val_loss = loss_fn.forward(&predictions, &y_val_dyn)?;
166
167 // Store metrics
168 epoch_history
169 .get_mut("train_loss")
170 .unwrap()
171 .push(train_loss);
172 epoch_history.get_mut("val_loss").unwrap().push(val_loss);
173
174 // Update the scheduler
175 scheduler.update_lr(epoch);
176
177 // Print progress
178 if epoch % 10 == 0 || epoch == epochs - 1 {
179 println!(
180 "Epoch {}/{}: train_loss = {:.6}, val_loss = {:.6}, lr = {:.6}",
181 epoch + 1,
182 epochs,
183 train_loss,
184 val_loss,
185 current_lr
186 );
187 }
188
189 // Visualize progress (manually calling the visualization callback)
190 if epoch % 5 == 0 || epoch == epochs - 1 {
191 let mut context = CallbackContext {
192 epoch,
193 total_epochs: epochs,
194 batch: 0,
195 total_batches: 1,
196 batch_loss: None,
197 epoch_loss: Some(train_loss),
198 val_loss: Some(val_loss),
199 metrics: vec![],
200 history: &epoch_history,
201 stop_training: false,
202 model: None,
203 };
204
205 visualization_cb.on_event(CallbackTiming::AfterEpoch, &mut context)?;
206 }
207 }
208
209 // Final visualization
210 let mut context = CallbackContext {
211 epoch: epochs - 1,
212 total_epochs: epochs,
213 batch: 0,
214 total_batches: 1,
215 batch_loss: None,
216 epoch_loss: Some(*epoch_history.get("train_loss").unwrap().last().unwrap()),
217 val_loss: Some(*epoch_history.get("val_loss").unwrap().last().unwrap()),
218 metrics: vec![],
219 history: &epoch_history,
220 stop_training: false,
221 model: None,
222 };
223
224 visualization_cb.on_event(CallbackTiming::AfterTraining, &mut context)?;
225
226 println!("\nTraining complete!");
227
228 // Export history to CSV
229 export_history_to_csv(&epoch_history, "training_history.csv")?;
230 println!("Exported training history to training_history.csv");
231
232 // Analyze training results
233 let analysis = analyze_training_history(&epoch_history);
234 println!("\nTraining Analysis:");
235 println!("------------------");
236 for issue in analysis {
237 println!("{}", issue);
238 }
239
240 // Make predictions on validation data
241 println!("\nMaking predictions on validation data...");
242 let predictions = model.forward(&x_val_dyn)?;
243
244 // Calculate and display final metrics
245 let mse = loss_fn.forward(&predictions, &y_val_dyn)?;
246 println!("Final validation MSE: {:.6}", mse);
247
248 // Display a few sample predictions
249 println!("\nSample predictions:");
250 println!("------------------");
251 println!(" X | True Y | Predicted Y ");
252 println!("---------------------------");
253
254 // Show first 5 predictions
255 let num_samples_to_show = std::cmp::min(5, x_val.shape()[0]);
256 for i in 0..num_samples_to_show {
257 println!(
258 "{:.4} | {:.4} | {:.4}",
259 x_val[[i, 0]],
260 y_val[[i, 0]],
261 predictions[[i, 0]]
262 );
263 }
264
265 println!("\nVisualization demonstration complete!");
266 Ok(())
267}
Additional examples can be found in:
Trait Implementations§
Source§impl<F: Float + Debug + ScalarOperand + 'static> Clone for Sequential<F>
impl<F: Float + Debug + ScalarOperand + 'static> Clone for Sequential<F>
Source§impl<F: Float + Debug + ScalarOperand + 'static> Default for Sequential<F>
impl<F: Float + Debug + ScalarOperand + 'static> Default for Sequential<F>
Source§impl<F: Float + Debug + ScalarOperand + 'static> Model<F> for Sequential<F>
impl<F: Float + Debug + ScalarOperand + 'static> Model<F> for Sequential<F>
Source§fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
Forward pass through the model
Source§fn backward(
&self,
input: &Array<F, IxDyn>,
grad_output: &Array<F, IxDyn>,
) -> Result<Array<F, IxDyn>>
fn backward( &self, input: &Array<F, IxDyn>, grad_output: &Array<F, IxDyn>, ) -> Result<Array<F, IxDyn>>
Backward pass to compute gradients
Source§fn update(&mut self, learning_rate: F) -> Result<()>
fn update(&mut self, learning_rate: F) -> Result<()>
Update the model parameters with the given learning rate
Source§fn train_batch(
&mut self,
inputs: &Array<F, IxDyn>,
targets: &Array<F, IxDyn>,
loss_fn: &dyn Loss<F>,
optimizer: &mut dyn Optimizer<F>,
) -> Result<F>
fn train_batch( &mut self, inputs: &Array<F, IxDyn>, targets: &Array<F, IxDyn>, loss_fn: &dyn Loss<F>, optimizer: &mut dyn Optimizer<F>, ) -> Result<F>
Train the model on a batch of data
Auto Trait Implementations§
impl<F> Freeze for Sequential<F>
impl<F> !RefUnwindSafe for Sequential<F>
impl<F> Send for Sequential<F>where
F: Send,
impl<F> Sync for Sequential<F>where
F: Sync,
impl<F> Unpin for Sequential<F>
impl<F> !UnwindSafe for Sequential<F>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more