pub struct Adam { /* private fields */ }Expand description
Adam optimizer
Implementations§
Source§impl Adam
impl Adam
Sourcepub fn new(learning_rate: f64) -> Self
pub fn new(learning_rate: f64) -> Self
Examples found in repository?
examples/few_shot_learning.rs (line 114)
93fn test_prototypical_networks(
94 data: &Array2<f64>,
95 labels: &Array1<usize>,
96 qnn: QuantumNeuralNetwork,
97) -> Result<()> {
98 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn);
99
100 // Generate episodes for training
101 let num_episodes = 10;
102 let mut episodes = Vec::new();
103
104 for _ in 0..num_episodes {
105 let episode = FewShotLearner::generate_episode(
106 data, labels, 5, // 5-way
107 3, // 3-shot
108 5, // 5 query examples per class
109 )?;
110 episodes.push(episode);
111 }
112
113 // Train
114 let mut optimizer = Adam::new(0.01);
115 let accuracies = learner.train(&episodes, &mut optimizer, 20)?;
116
117 // Print results
118 println!(" Training completed:");
119 println!(" - Initial accuracy: {:.2}%", accuracies[0] * 100.0);
120 println!(
121 " - Final accuracy: {:.2}%",
122 accuracies.last().unwrap() * 100.0
123 );
124 println!(
125 " - Improvement: {:.2}%",
126 (accuracies.last().unwrap() - accuracies[0]) * 100.0
127 );
128
129 Ok(())
130}
131
132/// Test MAML
133fn test_maml(data: &Array2<f64>, labels: &Array1<usize>, qnn: QuantumNeuralNetwork) -> Result<()> {
134 let mut learner = FewShotLearner::new(
135 FewShotMethod::MAML {
136 inner_steps: 5,
137 inner_lr: 0.01,
138 },
139 qnn,
140 );
141
142 // Generate meta-training tasks
143 let num_tasks = 20;
144 let mut tasks = Vec::new();
145
146 for _ in 0..num_tasks {
147 let task = FewShotLearner::generate_episode(
148 data, labels, 3, // 3-way (fewer classes for MAML)
149 5, // 5-shot
150 5, // 5 query examples
151 )?;
152 tasks.push(task);
153 }
154
155 // Meta-train
156 let mut meta_optimizer = Adam::new(0.001);
157 let losses = learner.train(&tasks, &mut meta_optimizer, 10)?;
158
159 println!(" Meta-training completed:");
160 println!(" - Initial loss: {:.4}", losses[0]);
161 println!(" - Final loss: {:.4}", losses.last().unwrap());
162 println!(
163 " - Convergence rate: {:.2}%",
164 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
165 );
166
167 Ok(())
168}
169
170/// Compare performance across different K-shot values
171fn compare_shot_performance(
172 data: &Array2<f64>,
173 labels: &Array1<usize>,
174 qnn: QuantumNeuralNetwork,
175) -> Result<()> {
176 let k_values = vec![1, 3, 5, 10];
177
178 for k in k_values {
179 println!("\n Testing {k}-shot learning:");
180
181 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn.clone());
182
183 // Generate episodes
184 let mut episodes = Vec::new();
185 for _ in 0..5 {
186 let episode = FewShotLearner::generate_episode(
187 data, labels, 3, // 3-way
188 k, // k-shot
189 5, // 5 query
190 )?;
191 episodes.push(episode);
192 }
193
194 // Quick training
195 let mut optimizer = Adam::new(0.01);
196 let accuracies = learner.train(&episodes, &mut optimizer, 10)?;
197
198 println!(
199 " Final accuracy: {:.2}%",
200 accuracies.last().unwrap() * 100.0
201 );
202 }
203
204 Ok(())
205}More examples
examples/quantum_diffusion.rs (line 116)
94fn train_diffusion_model() -> Result<()> {
95 // Generate synthetic 2D data (two moons)
96 let num_samples = 200;
97 let data = generate_two_moons(num_samples);
98
99 println!(" Generated {num_samples} samples of 2D two-moons data");
100
101 // Create diffusion model
102 let mut model = QuantumDiffusionModel::new(
103 2, // data dimension
104 4, // num qubits
105 50, // timesteps
106 NoiseSchedule::Cosine { s: 0.008 },
107 )?;
108
109 println!(" Created quantum diffusion model:");
110 println!(" - Data dimension: 2");
111 println!(" - Qubits: 4");
112 println!(" - Timesteps: 50");
113 println!(" - Schedule: Cosine");
114
115 // Train model
116 let mut optimizer = Adam::new(0.001);
117 let epochs = 100;
118 let batch_size = 32;
119
120 println!("\n Training for {epochs} epochs...");
121 let losses = model.train(&data, &mut optimizer, epochs, batch_size)?;
122
123 // Print training statistics
124 println!("\n Training Statistics:");
125 println!(" - Initial loss: {:.4}", losses[0]);
126 println!(" - Final loss: {:.4}", losses.last().unwrap());
127 println!(
128 " - Improvement: {:.2}%",
129 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
130 );
131
132 Ok(())
133}examples/continuous_rl.rs (line 97)
77fn train_qddpg_pendulum() -> Result<()> {
78 let state_dim = 3;
79 let action_dim = 1;
80 let action_bounds = vec![(-2.0, 2.0)];
81 let num_qubits = 4;
82 let buffer_capacity = 10000;
83
84 // Create QDDPG agent
85 let mut agent = QuantumDDPG::new(
86 state_dim,
87 action_dim,
88 action_bounds,
89 num_qubits,
90 buffer_capacity,
91 )?;
92
93 // Create environment
94 let mut env = PendulumEnvironment::new();
95
96 // Create optimizers
97 let mut actor_optimizer = Adam::new(0.001);
98 let mut critic_optimizer = Adam::new(0.001);
99
100 // Train for a few episodes (reduced for demo)
101 let episodes = 50;
102 println!(" Training QDDPG for {episodes} episodes...");
103
104 let rewards = agent.train(
105 &mut env,
106 episodes,
107 &mut actor_optimizer,
108 &mut critic_optimizer,
109 )?;
110
111 // Print training statistics
112 let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
113 let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
114
115 println!("\n Training Statistics:");
116 println!(" - Average initial reward: {avg_initial:.2}");
117 println!(" - Average final reward: {avg_final:.2}");
118 println!(" - Improvement: {:.2}", avg_final - avg_initial);
119
120 // Test trained agent
121 println!("\n Testing trained agent...");
122 test_trained_agent(&agent, &mut env)?;
123
124 Ok(())
125}examples/quantum_meta_learning.rs (line 94)
56fn maml_demo() -> Result<()> {
57 // Create quantum model
58 let layers = vec![
59 QNNLayerType::EncodingLayer { num_features: 4 },
60 QNNLayerType::VariationalLayer { num_params: 12 },
61 QNNLayerType::EntanglementLayer {
62 connectivity: "circular".to_string(),
63 },
64 QNNLayerType::VariationalLayer { num_params: 12 },
65 QNNLayerType::MeasurementLayer {
66 measurement_basis: "computational".to_string(),
67 },
68 ];
69
70 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
71
72 // Create MAML learner
73 let algorithm = MetaLearningAlgorithm::MAML {
74 inner_steps: 5,
75 inner_lr: 0.01,
76 first_order: true, // Use first-order approximation for efficiency
77 };
78
79 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
80
81 println!(" Created MAML meta-learner:");
82 println!(" - Inner steps: 5");
83 println!(" - Inner learning rate: 0.01");
84 println!(" - Using first-order approximation");
85
86 // Generate tasks
87 let generator = TaskGenerator::new(4, 3);
88 let tasks: Vec<MetaTask> = (0..20)
89 .map(|_| generator.generate_rotation_task(30))
90 .collect();
91
92 // Meta-train
93 println!("\n Meta-training on 20 rotation tasks...");
94 let mut optimizer = Adam::new(0.001);
95 meta_learner.meta_train(&tasks, &mut optimizer, 50, 5)?;
96
97 // Test adaptation
98 let test_task = generator.generate_rotation_task(20);
99 println!("\n Testing adaptation to new task...");
100
101 let adapted_params = meta_learner.adapt_to_task(&test_task)?;
102 println!(" Successfully adapted to new task");
103 println!(
104 " Parameter adaptation magnitude: {:.4}",
105 (&adapted_params - meta_learner.meta_params())
106 .mapv(f64::abs)
107 .mean()
108 .unwrap()
109 );
110
111 Ok(())
112}
113
114/// Reptile algorithm demonstration
115fn reptile_demo() -> Result<()> {
116 let layers = vec![
117 QNNLayerType::EncodingLayer { num_features: 2 },
118 QNNLayerType::VariationalLayer { num_params: 8 },
119 QNNLayerType::MeasurementLayer {
120 measurement_basis: "Pauli-Z".to_string(),
121 },
122 ];
123
124 let qnn = QuantumNeuralNetwork::new(layers, 4, 2, 2)?;
125
126 let algorithm = MetaLearningAlgorithm::Reptile {
127 inner_steps: 10,
128 inner_lr: 0.1,
129 };
130
131 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
132
133 println!(" Created Reptile meta-learner:");
134 println!(" - Inner steps: 10");
135 println!(" - Inner learning rate: 0.1");
136
137 // Generate sinusoid tasks
138 let generator = TaskGenerator::new(2, 2);
139 let tasks: Vec<MetaTask> = (0..15)
140 .map(|_| generator.generate_sinusoid_task(40))
141 .collect();
142
143 println!("\n Meta-training on 15 sinusoid tasks...");
144 let mut optimizer = Adam::new(0.001);
145 meta_learner.meta_train(&tasks, &mut optimizer, 30, 3)?;
146
147 println!(" Reptile training complete");
148
149 // Analyze task similarities
150 println!("\n Task parameter statistics:");
151 for (i, task) in tasks.iter().take(3).enumerate() {
152 if let Some(amplitude) = task.metadata.get("amplitude") {
153 if let Some(phase) = task.metadata.get("phase") {
154 println!(" Task {i}: amplitude={amplitude:.2}, phase={phase:.2}");
155 }
156 }
157 }
158
159 Ok(())
160}
161
162/// `ProtoMAML` demonstration
163fn protomaml_demo() -> Result<()> {
164 let layers = vec![
165 QNNLayerType::EncodingLayer { num_features: 8 },
166 QNNLayerType::VariationalLayer { num_params: 16 },
167 QNNLayerType::EntanglementLayer {
168 connectivity: "full".to_string(),
169 },
170 QNNLayerType::MeasurementLayer {
171 measurement_basis: "computational".to_string(),
172 },
173 ];
174
175 let qnn = QuantumNeuralNetwork::new(layers, 4, 8, 16)?;
176
177 let algorithm = MetaLearningAlgorithm::ProtoMAML {
178 inner_steps: 5,
179 inner_lr: 0.01,
180 proto_weight: 0.5, // Weight for prototype regularization
181 };
182
183 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
184
185 println!(" Created ProtoMAML meta-learner:");
186 println!(" - Combines MAML with prototypical networks");
187 println!(" - Prototype weight: 0.5");
188
189 // Generate classification tasks
190 let generator = TaskGenerator::new(8, 4);
191 let tasks: Vec<MetaTask> = (0..10)
192 .map(|_| generator.generate_rotation_task(50))
193 .collect();
194
195 println!("\n Meta-training on 4-way classification tasks...");
196 let mut optimizer = Adam::new(0.001);
197 meta_learner.meta_train(&tasks, &mut optimizer, 40, 2)?;
198
199 println!(" ProtoMAML leverages both gradient-based and metric-based learning");
200
201 Ok(())
202}
203
204/// Meta-SGD demonstration
205fn metasgd_demo() -> Result<()> {
206 let layers = vec![
207 QNNLayerType::EncodingLayer { num_features: 4 },
208 QNNLayerType::VariationalLayer { num_params: 12 },
209 QNNLayerType::MeasurementLayer {
210 measurement_basis: "Pauli-XYZ".to_string(),
211 },
212 ];
213
214 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
215
216 let algorithm = MetaLearningAlgorithm::MetaSGD { inner_steps: 3 };
217
218 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
219
220 println!(" Created Meta-SGD learner:");
221 println!(" - Learns per-parameter learning rates");
222 println!(" - Inner steps: 3");
223
224 // Generate diverse tasks
225 let generator = TaskGenerator::new(4, 3);
226 let mut tasks = Vec::new();
227
228 // Mix different task types
229 for i in 0..12 {
230 if i % 2 == 0 {
231 tasks.push(generator.generate_rotation_task(30));
232 } else {
233 tasks.push(generator.generate_sinusoid_task(30));
234 }
235 }
236
237 println!("\n Meta-training on mixed task distribution...");
238 let mut optimizer = Adam::new(0.0005);
239 meta_learner.meta_train(&tasks, &mut optimizer, 50, 4)?;
240
241 if let Some(lr) = meta_learner.per_param_lr() {
242 println!("\n Learned per-parameter learning rates:");
243 println!(
244 " - Min LR: {:.4}",
245 lr.iter().copied().fold(f64::INFINITY, f64::min)
246 );
247 println!(
248 " - Max LR: {:.4}",
249 lr.iter().copied().fold(f64::NEG_INFINITY, f64::max)
250 );
251 println!(" - Mean LR: {:.4}", lr.mean().unwrap());
252 }
253
254 Ok(())
255}
256
257/// ANIL demonstration
258fn anil_demo() -> Result<()> {
259 let layers = vec![
260 QNNLayerType::EncodingLayer { num_features: 6 },
261 QNNLayerType::VariationalLayer { num_params: 12 },
262 QNNLayerType::EntanglementLayer {
263 connectivity: "circular".to_string(),
264 },
265 QNNLayerType::VariationalLayer { num_params: 12 },
266 QNNLayerType::VariationalLayer { num_params: 6 }, // Final layer (adapted)
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let qnn = QuantumNeuralNetwork::new(layers, 4, 6, 2)?;
273
274 let algorithm = MetaLearningAlgorithm::ANIL {
275 inner_steps: 10,
276 inner_lr: 0.1,
277 };
278
279 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
280
281 println!(" Created ANIL (Almost No Inner Loop) learner:");
282 println!(" - Only adapts final layer during inner loop");
283 println!(" - More parameter efficient than MAML");
284 println!(" - Inner steps: 10");
285
286 // Generate binary classification tasks
287 let generator = TaskGenerator::new(6, 2);
288 let tasks: Vec<MetaTask> = (0..15)
289 .map(|_| generator.generate_rotation_task(40))
290 .collect();
291
292 println!("\n Meta-training on binary classification tasks...");
293 let mut optimizer = Adam::new(0.001);
294 meta_learner.meta_train(&tasks, &mut optimizer, 40, 5)?;
295
296 println!(" ANIL reduces computational cost while maintaining performance");
297
298 Ok(())
299}examples/quantum_adversarial.rs (line 280)
233fn adversarial_training_demo() -> Result<()> {
234 // Create model and trainer
235 let layers = vec![
236 QNNLayerType::EncodingLayer { num_features: 4 },
237 QNNLayerType::VariationalLayer { num_params: 12 },
238 QNNLayerType::EntanglementLayer {
239 connectivity: "circular".to_string(),
240 },
241 QNNLayerType::MeasurementLayer {
242 measurement_basis: "computational".to_string(),
243 },
244 ];
245
246 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
247
248 let defense = QuantumDefenseStrategy::AdversarialTraining {
249 attack_types: vec![
250 QuantumAttackType::FGSM { epsilon: 0.08 },
251 QuantumAttackType::PGD {
252 epsilon: 0.08,
253 alpha: 0.01,
254 num_steps: 7,
255 },
256 ],
257 adversarial_ratio: 0.4,
258 };
259
260 let mut config = create_default_adversarial_config();
261 config.epochs = 20; // Reduced for demo
262 config.eval_interval = 5;
263
264 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
265
266 println!(" Adversarial training configuration:");
267 println!(" - Attack types: FGSM + PGD");
268 println!(" - Adversarial ratio: 40%");
269 println!(" - Training epochs: 20");
270
271 // Generate synthetic training data
272 let train_data = generate_quantum_dataset(200, 4);
273 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
274
275 let val_data = generate_quantum_dataset(50, 4);
276 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
277
278 // Train with adversarial examples
279 println!("\n Starting adversarial training...");
280 let mut optimizer = Adam::new(0.001);
281 let losses = trainer.train(
282 &train_data,
283 &train_labels,
284 &val_data,
285 &val_labels,
286 &mut optimizer,
287 )?;
288
289 println!(" Training completed!");
290 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
291
292 // Show final robustness metrics
293 let metrics = trainer.get_robustness_metrics();
294 println!("\n Final robustness metrics:");
295 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
296 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
297 println!(
298 " - Attack success rate: {:.3}",
299 metrics.attack_success_rate
300 );
301
302 Ok(())
303}examples/quantum_continual_learning.rs (line 95)
62fn ewc_demo() -> Result<()> {
63 // Create quantum model
64 let layers = vec![
65 QNNLayerType::EncodingLayer { num_features: 4 },
66 QNNLayerType::VariationalLayer { num_params: 12 },
67 QNNLayerType::EntanglementLayer {
68 connectivity: "circular".to_string(),
69 },
70 QNNLayerType::VariationalLayer { num_params: 8 },
71 QNNLayerType::MeasurementLayer {
72 measurement_basis: "computational".to_string(),
73 },
74 ];
75
76 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78 // Create EWC strategy
79 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80 importance_weight: 1000.0,
81 fisher_samples: 200,
82 };
83
84 let mut learner = QuantumContinualLearner::new(model, strategy);
85
86 println!(" Created EWC continual learner:");
87 println!(" - Importance weight: 1000.0");
88 println!(" - Fisher samples: 200");
89
90 // Generate task sequence
91 let tasks = generate_task_sequence(3, 100, 4);
92
93 println!("\n Learning sequence of {} tasks...", tasks.len());
94
95 let mut optimizer = Adam::new(0.001);
96 let mut task_accuracies = Vec::new();
97
98 for (i, task) in tasks.iter().enumerate() {
99 println!(" \n Training on {}...", task.task_id);
100
101 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102 task_accuracies.push(metrics.current_accuracy);
103
104 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
105
106 // Evaluate forgetting on previous tasks
107 if i > 0 {
108 let all_accuracies = learner.evaluate_all_tasks()?;
109 let avg_prev_accuracy = all_accuracies
110 .iter()
111 .take(i)
112 .map(|(_, &acc)| acc)
113 .sum::<f64>()
114 / i as f64;
115
116 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117 }
118 }
119
120 // Final evaluation
121 let forgetting_metrics = learner.get_forgetting_metrics();
122 println!("\n EWC Results:");
123 println!(
124 " - Average accuracy: {:.3}",
125 forgetting_metrics.average_accuracy
126 );
127 println!(
128 " - Forgetting measure: {:.3}",
129 forgetting_metrics.forgetting_measure
130 );
131 println!(
132 " - Continual learning score: {:.3}",
133 forgetting_metrics.continual_learning_score
134 );
135
136 Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141 let layers = vec![
142 QNNLayerType::EncodingLayer { num_features: 4 },
143 QNNLayerType::VariationalLayer { num_params: 8 },
144 QNNLayerType::MeasurementLayer {
145 measurement_basis: "computational".to_string(),
146 },
147 ];
148
149 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151 let strategy = ContinualLearningStrategy::ExperienceReplay {
152 buffer_size: 500,
153 replay_ratio: 0.3,
154 memory_selection: MemorySelectionStrategy::Random,
155 };
156
157 let mut learner = QuantumContinualLearner::new(model, strategy);
158
159 println!(" Created Experience Replay learner:");
160 println!(" - Buffer size: 500");
161 println!(" - Replay ratio: 30%");
162 println!(" - Selection: Random");
163
164 // Generate diverse tasks
165 let tasks = generate_diverse_tasks(4, 80, 4);
166
167 println!("\n Learning {} diverse tasks...", tasks.len());
168
169 let mut optimizer = Adam::new(0.002);
170
171 for (i, task) in tasks.iter().enumerate() {
172 println!(" \n Learning {}...", task.task_id);
173
174 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
177
178 // Show memory buffer status
179 println!(" - Memory buffer usage: replay experiences stored");
180
181 if i > 0 {
182 let all_accuracies = learner.evaluate_all_tasks()?;
183 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184 println!(" - Average retention: {retention_rate:.3}");
185 }
186 }
187
188 let final_metrics = learner.get_forgetting_metrics();
189 println!("\n Experience Replay Results:");
190 println!(
191 " - Final average accuracy: {:.3}",
192 final_metrics.average_accuracy
193 );
194 println!(
195 " - Forgetting reduction: {:.3}",
196 1.0 - final_metrics.forgetting_measure
197 );
198
199 Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204 let layers = vec![
205 QNNLayerType::EncodingLayer { num_features: 4 },
206 QNNLayerType::VariationalLayer { num_params: 6 },
207 QNNLayerType::MeasurementLayer {
208 measurement_basis: "computational".to_string(),
209 },
210 ];
211
212 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215 lateral_connections: true,
216 adaptation_layers: 2,
217 };
218
219 let mut learner = QuantumContinualLearner::new(model, strategy);
220
221 println!(" Created Progressive Networks learner:");
222 println!(" - Lateral connections: enabled");
223 println!(" - Adaptation layers: 2");
224
225 // Generate related tasks for transfer learning
226 let tasks = generate_related_tasks(3, 60, 4);
227
228 println!("\n Learning {} related tasks...", tasks.len());
229
230 let mut optimizer = Adam::new(0.001);
231 let mut learning_speeds = Vec::new();
232
233 for (i, task) in tasks.iter().enumerate() {
234 println!(" \n Adding column for {}...", task.task_id);
235
236 let start_time = std::time::Instant::now();
237 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238 let learning_time = start_time.elapsed();
239
240 learning_speeds.push(learning_time);
241
242 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
243 println!(" - Learning time: {learning_time:.2?}");
244
245 if i > 0 {
246 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247 println!(" - Learning speedup: {speedup:.2}x");
248 }
249 }
250
251 println!("\n Progressive Networks Results:");
252 println!(" - No catastrophic forgetting (by design)");
253 println!(" - Lateral connections enable knowledge transfer");
254 println!(" - Model capacity grows with new tasks");
255
256 Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261 let layers = vec![
262 QNNLayerType::EncodingLayer { num_features: 4 },
263 QNNLayerType::VariationalLayer { num_params: 10 },
264 QNNLayerType::EntanglementLayer {
265 connectivity: "circular".to_string(),
266 },
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275 distillation_weight: 0.5,
276 temperature: 3.0,
277 };
278
279 let mut learner = QuantumContinualLearner::new(model, strategy);
280
281 println!(" Created Learning without Forgetting learner:");
282 println!(" - Distillation weight: 0.5");
283 println!(" - Temperature: 3.0");
284
285 // Generate task sequence
286 let tasks = generate_task_sequence(4, 70, 4);
287
288 println!("\n Learning with knowledge distillation...");
289
290 let mut optimizer = Adam::new(0.001);
291 let mut distillation_losses = Vec::new();
292
293 for (i, task) in tasks.iter().enumerate() {
294 println!(" \n Learning {}...", task.task_id);
295
296 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
299
300 if i > 0 {
301 // Simulate distillation loss tracking
302 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303 distillation_losses.push(distillation_loss);
304 println!(" - Distillation loss: {distillation_loss:.3}");
305
306 let all_accuracies = learner.evaluate_all_tasks()?;
307 let stability = all_accuracies
308 .values()
309 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310 .sum::<f64>()
311 / all_accuracies.len() as f64;
312
313 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
314 }
315 }
316
317 println!("\n LwF Results:");
318 println!(" - Knowledge distillation preserves previous task performance");
319 println!(" - Temperature scaling provides soft targets");
320 println!(" - Balances plasticity and stability");
321
322 Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327 let layers = vec![
328 QNNLayerType::EncodingLayer { num_features: 4 },
329 QNNLayerType::VariationalLayer { num_params: 16 },
330 QNNLayerType::EntanglementLayer {
331 connectivity: "full".to_string(),
332 },
333 QNNLayerType::MeasurementLayer {
334 measurement_basis: "computational".to_string(),
335 },
336 ];
337
338 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340 let strategy = ContinualLearningStrategy::ParameterIsolation {
341 allocation_strategy: ParameterAllocationStrategy::Masking,
342 growth_threshold: 0.8,
343 };
344
345 let mut learner = QuantumContinualLearner::new(model, strategy);
346
347 println!(" Created Parameter Isolation learner:");
348 println!(" - Allocation strategy: Masking");
349 println!(" - Growth threshold: 0.8");
350
351 // Generate tasks with different requirements
352 let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354 println!("\n Learning with parameter isolation...");
355
356 let mut optimizer = Adam::new(0.001);
357 let mut parameter_usage = Vec::new();
358
359 for (i, task) in tasks.iter().enumerate() {
360 println!(" \n Allocating parameters for {}...", task.task_id);
361
362 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364 // Simulate parameter usage tracking
365 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366 parameter_usage.push(used_params);
367
368 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
369 println!(" - Parameters allocated: {}/{}", used_params, 16);
370 println!(
371 " - Parameter efficiency: {:.1}%",
372 used_params as f64 / 16.0 * 100.0
373 );
374
375 if i > 0 {
376 let all_accuracies = learner.evaluate_all_tasks()?;
377 let interference = 1.0
378 - all_accuracies
379 .values()
380 .take(i)
381 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382 .sum::<f64>()
383 / i as f64;
384
385 println!(" - Task interference: {:.1}%", interference * 100.0);
386 }
387 }
388
389 println!("\n Parameter Isolation Results:");
390 println!(" - Dedicated parameters prevent interference");
391 println!(" - Scalable to many tasks");
392 println!(" - Maintains task-specific knowledge");
393
394 Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399 println!(" Comprehensive continual learning evaluation...");
400
401 // Compare different strategies
402 let strategies = vec![
403 (
404 "EWC",
405 ContinualLearningStrategy::ElasticWeightConsolidation {
406 importance_weight: 500.0,
407 fisher_samples: 100,
408 },
409 ),
410 (
411 "Experience Replay",
412 ContinualLearningStrategy::ExperienceReplay {
413 buffer_size: 300,
414 replay_ratio: 0.2,
415 memory_selection: MemorySelectionStrategy::Random,
416 },
417 ),
418 (
419 "Quantum Regularization",
420 ContinualLearningStrategy::QuantumRegularization {
421 entanglement_preservation: 0.1,
422 parameter_drift_penalty: 0.5,
423 },
424 ),
425 ];
426
427 // Generate challenging task sequence
428 let tasks = generate_challenging_sequence(5, 60, 4);
429
430 println!(
431 "\n Comparing strategies on {} challenging tasks:",
432 tasks.len()
433 );
434
435 for (strategy_name, strategy) in strategies {
436 println!("\n --- {strategy_name} ---");
437
438 let layers = vec![
439 QNNLayerType::EncodingLayer { num_features: 4 },
440 QNNLayerType::VariationalLayer { num_params: 8 },
441 QNNLayerType::MeasurementLayer {
442 measurement_basis: "computational".to_string(),
443 },
444 ];
445
446 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447 let mut learner = QuantumContinualLearner::new(model, strategy);
448 let mut optimizer = Adam::new(0.001);
449
450 for task in &tasks {
451 learner.learn_task(task.clone(), &mut optimizer, 20)?;
452 }
453
454 let final_metrics = learner.get_forgetting_metrics();
455 println!(
456 " - Average accuracy: {:.3}",
457 final_metrics.average_accuracy
458 );
459 println!(
460 " - Forgetting measure: {:.3}",
461 final_metrics.forgetting_measure
462 );
463 println!(
464 " - CL score: {:.3}",
465 final_metrics.continual_learning_score
466 );
467 }
468
469 Ok(())
470}
471
472/// Demonstrate forgetting analysis
473fn forgetting_analysis_demo() -> Result<()> {
474 println!(" Detailed forgetting analysis...");
475
476 let layers = vec![
477 QNNLayerType::EncodingLayer { num_features: 4 },
478 QNNLayerType::VariationalLayer { num_params: 12 },
479 QNNLayerType::MeasurementLayer {
480 measurement_basis: "computational".to_string(),
481 },
482 ];
483
484 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
485
486 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
487 importance_weight: 1000.0,
488 fisher_samples: 150,
489 };
490
491 let mut learner = QuantumContinualLearner::new(model, strategy);
492
493 // Create tasks with increasing difficulty
494 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
495
496 println!("\n Learning tasks with increasing difficulty...");
497
498 let mut optimizer = Adam::new(0.001);
499 let mut accuracy_matrix = Vec::new();
500
501 for (i, task) in tasks.iter().enumerate() {
502 println!(
503 " \n Learning {} (difficulty level {})...",
504 task.task_id,
505 i + 1
506 );
507
508 learner.learn_task(task.clone(), &mut optimizer, 25)?;
509
510 // Evaluate on all tasks learned so far
511 let all_accuracies = learner.evaluate_all_tasks()?;
512 let mut current_row = Vec::new();
513
514 for j in 0..=i {
515 let task_id = &tasks[j].task_id;
516 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
517 current_row.push(*accuracy);
518 }
519
520 accuracy_matrix.push(current_row.clone());
521
522 // Print current performance
523 for (j, &acc) in current_row.iter().enumerate() {
524 println!(" - Task {}: {:.3}", j + 1, acc);
525 }
526 }
527
528 println!("\n Forgetting Analysis Results:");
529
530 // Compute backward transfer
531 for i in 1..accuracy_matrix.len() {
532 for j in 0..i {
533 let current_acc = accuracy_matrix[i][j];
534 let original_acc = accuracy_matrix[j][j];
535 let forgetting = (original_acc - current_acc).max(0.0);
536
537 if forgetting > 0.1 {
538 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
539 j + 1, i + 1, forgetting);
540 }
541 }
542 }
543
544 // Compute average forgetting
545 let mut total_forgetting = 0.0;
546 let mut num_comparisons = 0;
547
548 for i in 1..accuracy_matrix.len() {
549 for j in 0..i {
550 let current_acc = accuracy_matrix[i][j];
551 let original_acc = accuracy_matrix[j][j];
552 total_forgetting += (original_acc - current_acc).max(0.0);
553 num_comparisons += 1;
554 }
555 }
556
557 let avg_forgetting = if num_comparisons > 0 {
558 total_forgetting / f64::from(num_comparisons)
559 } else {
560 0.0
561 };
562
563 println!(" - Average forgetting: {avg_forgetting:.3}");
564
565 // Compute final average accuracy
566 if let Some(final_row) = accuracy_matrix.last() {
567 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
568 println!(" - Final average accuracy: {final_avg:.3}");
569 println!(
570 " - Continual learning effectiveness: {:.1}%",
571 (1.0 - avg_forgetting) * 100.0
572 );
573 }
574
575 Ok(())
576}Additional examples can be found in:
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Adam
impl RefUnwindSafe for Adam
impl Send for Adam
impl Sync for Adam
impl Unpin for Adam
impl UnwindSafe for Adam
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.