pub struct Adam { /* private fields */ }Expand description
Adam optimizer
Implementations§
Source§impl Adam
impl Adam
Sourcepub fn new(learning_rate: f64) -> Self
pub fn new(learning_rate: f64) -> Self
Examples found in repository?
examples/few_shot_learning.rs (line 107)
86fn test_prototypical_networks(
87 data: &Array2<f64>,
88 labels: &Array1<usize>,
89 qnn: QuantumNeuralNetwork,
90) -> Result<()> {
91 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn);
92
93 // Generate episodes for training
94 let num_episodes = 10;
95 let mut episodes = Vec::new();
96
97 for _ in 0..num_episodes {
98 let episode = FewShotLearner::generate_episode(
99 data, labels, 5, // 5-way
100 3, // 3-shot
101 5, // 5 query examples per class
102 )?;
103 episodes.push(episode);
104 }
105
106 // Train
107 let mut optimizer = Adam::new(0.01);
108 let accuracies = learner.train(&episodes, &mut optimizer, 20)?;
109
110 // Print results
111 println!(" Training completed:");
112 println!(" - Initial accuracy: {:.2}%", accuracies[0] * 100.0);
113 println!(
114 " - Final accuracy: {:.2}%",
115 accuracies.last().unwrap() * 100.0
116 );
117 println!(
118 " - Improvement: {:.2}%",
119 (accuracies.last().unwrap() - accuracies[0]) * 100.0
120 );
121
122 Ok(())
123}
124
125/// Test MAML
126fn test_maml(data: &Array2<f64>, labels: &Array1<usize>, qnn: QuantumNeuralNetwork) -> Result<()> {
127 let mut learner = FewShotLearner::new(
128 FewShotMethod::MAML {
129 inner_steps: 5,
130 inner_lr: 0.01,
131 },
132 qnn,
133 );
134
135 // Generate meta-training tasks
136 let num_tasks = 20;
137 let mut tasks = Vec::new();
138
139 for _ in 0..num_tasks {
140 let task = FewShotLearner::generate_episode(
141 data, labels, 3, // 3-way (fewer classes for MAML)
142 5, // 5-shot
143 5, // 5 query examples
144 )?;
145 tasks.push(task);
146 }
147
148 // Meta-train
149 let mut meta_optimizer = Adam::new(0.001);
150 let losses = learner.train(&tasks, &mut meta_optimizer, 10)?;
151
152 println!(" Meta-training completed:");
153 println!(" - Initial loss: {:.4}", losses[0]);
154 println!(" - Final loss: {:.4}", losses.last().unwrap());
155 println!(
156 " - Convergence rate: {:.2}%",
157 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
158 );
159
160 Ok(())
161}
162
163/// Compare performance across different K-shot values
164fn compare_shot_performance(
165 data: &Array2<f64>,
166 labels: &Array1<usize>,
167 qnn: QuantumNeuralNetwork,
168) -> Result<()> {
169 let k_values = vec![1, 3, 5, 10];
170
171 for k in k_values {
172 println!("\n Testing {k}-shot learning:");
173
174 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn.clone());
175
176 // Generate episodes
177 let mut episodes = Vec::new();
178 for _ in 0..5 {
179 let episode = FewShotLearner::generate_episode(
180 data, labels, 3, // 3-way
181 k, // k-shot
182 5, // 5 query
183 )?;
184 episodes.push(episode);
185 }
186
187 // Quick training
188 let mut optimizer = Adam::new(0.01);
189 let accuracies = learner.train(&episodes, &mut optimizer, 10)?;
190
191 println!(
192 " Final accuracy: {:.2}%",
193 accuracies.last().unwrap() * 100.0
194 );
195 }
196
197 Ok(())
198}More examples
examples/quantum_diffusion.rs (line 109)
87fn train_diffusion_model() -> Result<()> {
88 // Generate synthetic 2D data (two moons)
89 let num_samples = 200;
90 let data = generate_two_moons(num_samples);
91
92 println!(" Generated {num_samples} samples of 2D two-moons data");
93
94 // Create diffusion model
95 let mut model = QuantumDiffusionModel::new(
96 2, // data dimension
97 4, // num qubits
98 50, // timesteps
99 NoiseSchedule::Cosine { s: 0.008 },
100 )?;
101
102 println!(" Created quantum diffusion model:");
103 println!(" - Data dimension: 2");
104 println!(" - Qubits: 4");
105 println!(" - Timesteps: 50");
106 println!(" - Schedule: Cosine");
107
108 // Train model
109 let mut optimizer = Adam::new(0.001);
110 let epochs = 100;
111 let batch_size = 32;
112
113 println!("\n Training for {epochs} epochs...");
114 let losses = model.train(&data, &mut optimizer, epochs, batch_size)?;
115
116 // Print training statistics
117 println!("\n Training Statistics:");
118 println!(" - Initial loss: {:.4}", losses[0]);
119 println!(" - Final loss: {:.4}", losses.last().unwrap());
120 println!(
121 " - Improvement: {:.2}%",
122 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
123 );
124
125 Ok(())
126}examples/continuous_rl.rs (line 90)
70fn train_qddpg_pendulum() -> Result<()> {
71 let state_dim = 3;
72 let action_dim = 1;
73 let action_bounds = vec![(-2.0, 2.0)];
74 let num_qubits = 4;
75 let buffer_capacity = 10000;
76
77 // Create QDDPG agent
78 let mut agent = QuantumDDPG::new(
79 state_dim,
80 action_dim,
81 action_bounds,
82 num_qubits,
83 buffer_capacity,
84 )?;
85
86 // Create environment
87 let mut env = PendulumEnvironment::new();
88
89 // Create optimizers
90 let mut actor_optimizer = Adam::new(0.001);
91 let mut critic_optimizer = Adam::new(0.001);
92
93 // Train for a few episodes (reduced for demo)
94 let episodes = 50;
95 println!(" Training QDDPG for {episodes} episodes...");
96
97 let rewards = agent.train(
98 &mut env,
99 episodes,
100 &mut actor_optimizer,
101 &mut critic_optimizer,
102 )?;
103
104 // Print training statistics
105 let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
106 let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
107
108 println!("\n Training Statistics:");
109 println!(" - Average initial reward: {avg_initial:.2}");
110 println!(" - Average final reward: {avg_final:.2}");
111 println!(" - Improvement: {:.2}", avg_final - avg_initial);
112
113 // Test trained agent
114 println!("\n Testing trained agent...");
115 test_trained_agent(&agent, &mut env)?;
116
117 Ok(())
118}examples/quantum_meta_learning.rs (line 87)
49fn maml_demo() -> Result<()> {
50 // Create quantum model
51 let layers = vec![
52 QNNLayerType::EncodingLayer { num_features: 4 },
53 QNNLayerType::VariationalLayer { num_params: 12 },
54 QNNLayerType::EntanglementLayer {
55 connectivity: "circular".to_string(),
56 },
57 QNNLayerType::VariationalLayer { num_params: 12 },
58 QNNLayerType::MeasurementLayer {
59 measurement_basis: "computational".to_string(),
60 },
61 ];
62
63 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
64
65 // Create MAML learner
66 let algorithm = MetaLearningAlgorithm::MAML {
67 inner_steps: 5,
68 inner_lr: 0.01,
69 first_order: true, // Use first-order approximation for efficiency
70 };
71
72 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
73
74 println!(" Created MAML meta-learner:");
75 println!(" - Inner steps: 5");
76 println!(" - Inner learning rate: 0.01");
77 println!(" - Using first-order approximation");
78
79 // Generate tasks
80 let generator = TaskGenerator::new(4, 3);
81 let tasks: Vec<MetaTask> = (0..20)
82 .map(|_| generator.generate_rotation_task(30))
83 .collect();
84
85 // Meta-train
86 println!("\n Meta-training on 20 rotation tasks...");
87 let mut optimizer = Adam::new(0.001);
88 meta_learner.meta_train(&tasks, &mut optimizer, 50, 5)?;
89
90 // Test adaptation
91 let test_task = generator.generate_rotation_task(20);
92 println!("\n Testing adaptation to new task...");
93
94 let adapted_params = meta_learner.adapt_to_task(&test_task)?;
95 println!(" Successfully adapted to new task");
96 println!(
97 " Parameter adaptation magnitude: {:.4}",
98 (&adapted_params - meta_learner.meta_params())
99 .mapv(f64::abs)
100 .mean()
101 .unwrap()
102 );
103
104 Ok(())
105}
106
107/// Reptile algorithm demonstration
108fn reptile_demo() -> Result<()> {
109 let layers = vec![
110 QNNLayerType::EncodingLayer { num_features: 2 },
111 QNNLayerType::VariationalLayer { num_params: 8 },
112 QNNLayerType::MeasurementLayer {
113 measurement_basis: "Pauli-Z".to_string(),
114 },
115 ];
116
117 let qnn = QuantumNeuralNetwork::new(layers, 4, 2, 2)?;
118
119 let algorithm = MetaLearningAlgorithm::Reptile {
120 inner_steps: 10,
121 inner_lr: 0.1,
122 };
123
124 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
125
126 println!(" Created Reptile meta-learner:");
127 println!(" - Inner steps: 10");
128 println!(" - Inner learning rate: 0.1");
129
130 // Generate sinusoid tasks
131 let generator = TaskGenerator::new(2, 2);
132 let tasks: Vec<MetaTask> = (0..15)
133 .map(|_| generator.generate_sinusoid_task(40))
134 .collect();
135
136 println!("\n Meta-training on 15 sinusoid tasks...");
137 let mut optimizer = Adam::new(0.001);
138 meta_learner.meta_train(&tasks, &mut optimizer, 30, 3)?;
139
140 println!(" Reptile training complete");
141
142 // Analyze task similarities
143 println!("\n Task parameter statistics:");
144 for (i, task) in tasks.iter().take(3).enumerate() {
145 if let Some(amplitude) = task.metadata.get("amplitude") {
146 if let Some(phase) = task.metadata.get("phase") {
147 println!(" Task {i}: amplitude={amplitude:.2}, phase={phase:.2}");
148 }
149 }
150 }
151
152 Ok(())
153}
154
155/// `ProtoMAML` demonstration
156fn protomaml_demo() -> Result<()> {
157 let layers = vec![
158 QNNLayerType::EncodingLayer { num_features: 8 },
159 QNNLayerType::VariationalLayer { num_params: 16 },
160 QNNLayerType::EntanglementLayer {
161 connectivity: "full".to_string(),
162 },
163 QNNLayerType::MeasurementLayer {
164 measurement_basis: "computational".to_string(),
165 },
166 ];
167
168 let qnn = QuantumNeuralNetwork::new(layers, 4, 8, 16)?;
169
170 let algorithm = MetaLearningAlgorithm::ProtoMAML {
171 inner_steps: 5,
172 inner_lr: 0.01,
173 proto_weight: 0.5, // Weight for prototype regularization
174 };
175
176 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
177
178 println!(" Created ProtoMAML meta-learner:");
179 println!(" - Combines MAML with prototypical networks");
180 println!(" - Prototype weight: 0.5");
181
182 // Generate classification tasks
183 let generator = TaskGenerator::new(8, 4);
184 let tasks: Vec<MetaTask> = (0..10)
185 .map(|_| generator.generate_rotation_task(50))
186 .collect();
187
188 println!("\n Meta-training on 4-way classification tasks...");
189 let mut optimizer = Adam::new(0.001);
190 meta_learner.meta_train(&tasks, &mut optimizer, 40, 2)?;
191
192 println!(" ProtoMAML leverages both gradient-based and metric-based learning");
193
194 Ok(())
195}
196
197/// Meta-SGD demonstration
198fn metasgd_demo() -> Result<()> {
199 let layers = vec![
200 QNNLayerType::EncodingLayer { num_features: 4 },
201 QNNLayerType::VariationalLayer { num_params: 12 },
202 QNNLayerType::MeasurementLayer {
203 measurement_basis: "Pauli-XYZ".to_string(),
204 },
205 ];
206
207 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
208
209 let algorithm = MetaLearningAlgorithm::MetaSGD { inner_steps: 3 };
210
211 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
212
213 println!(" Created Meta-SGD learner:");
214 println!(" - Learns per-parameter learning rates");
215 println!(" - Inner steps: 3");
216
217 // Generate diverse tasks
218 let generator = TaskGenerator::new(4, 3);
219 let mut tasks = Vec::new();
220
221 // Mix different task types
222 for i in 0..12 {
223 if i % 2 == 0 {
224 tasks.push(generator.generate_rotation_task(30));
225 } else {
226 tasks.push(generator.generate_sinusoid_task(30));
227 }
228 }
229
230 println!("\n Meta-training on mixed task distribution...");
231 let mut optimizer = Adam::new(0.0005);
232 meta_learner.meta_train(&tasks, &mut optimizer, 50, 4)?;
233
234 if let Some(lr) = meta_learner.per_param_lr() {
235 println!("\n Learned per-parameter learning rates:");
236 println!(
237 " - Min LR: {:.4}",
238 lr.iter().copied().fold(f64::INFINITY, f64::min)
239 );
240 println!(
241 " - Max LR: {:.4}",
242 lr.iter().copied().fold(f64::NEG_INFINITY, f64::max)
243 );
244 println!(" - Mean LR: {:.4}", lr.mean().unwrap());
245 }
246
247 Ok(())
248}
249
250/// ANIL demonstration
251fn anil_demo() -> Result<()> {
252 let layers = vec![
253 QNNLayerType::EncodingLayer { num_features: 6 },
254 QNNLayerType::VariationalLayer { num_params: 12 },
255 QNNLayerType::EntanglementLayer {
256 connectivity: "circular".to_string(),
257 },
258 QNNLayerType::VariationalLayer { num_params: 12 },
259 QNNLayerType::VariationalLayer { num_params: 6 }, // Final layer (adapted)
260 QNNLayerType::MeasurementLayer {
261 measurement_basis: "computational".to_string(),
262 },
263 ];
264
265 let qnn = QuantumNeuralNetwork::new(layers, 4, 6, 2)?;
266
267 let algorithm = MetaLearningAlgorithm::ANIL {
268 inner_steps: 10,
269 inner_lr: 0.1,
270 };
271
272 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
273
274 println!(" Created ANIL (Almost No Inner Loop) learner:");
275 println!(" - Only adapts final layer during inner loop");
276 println!(" - More parameter efficient than MAML");
277 println!(" - Inner steps: 10");
278
279 // Generate binary classification tasks
280 let generator = TaskGenerator::new(6, 2);
281 let tasks: Vec<MetaTask> = (0..15)
282 .map(|_| generator.generate_rotation_task(40))
283 .collect();
284
285 println!("\n Meta-training on binary classification tasks...");
286 let mut optimizer = Adam::new(0.001);
287 meta_learner.meta_train(&tasks, &mut optimizer, 40, 5)?;
288
289 println!(" ANIL reduces computational cost while maintaining performance");
290
291 Ok(())
292}examples/quantum_adversarial.rs (line 273)
226fn adversarial_training_demo() -> Result<()> {
227 // Create model and trainer
228 let layers = vec![
229 QNNLayerType::EncodingLayer { num_features: 4 },
230 QNNLayerType::VariationalLayer { num_params: 12 },
231 QNNLayerType::EntanglementLayer {
232 connectivity: "circular".to_string(),
233 },
234 QNNLayerType::MeasurementLayer {
235 measurement_basis: "computational".to_string(),
236 },
237 ];
238
239 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
240
241 let defense = QuantumDefenseStrategy::AdversarialTraining {
242 attack_types: vec![
243 QuantumAttackType::FGSM { epsilon: 0.08 },
244 QuantumAttackType::PGD {
245 epsilon: 0.08,
246 alpha: 0.01,
247 num_steps: 7,
248 },
249 ],
250 adversarial_ratio: 0.4,
251 };
252
253 let mut config = create_default_adversarial_config();
254 config.epochs = 20; // Reduced for demo
255 config.eval_interval = 5;
256
257 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
258
259 println!(" Adversarial training configuration:");
260 println!(" - Attack types: FGSM + PGD");
261 println!(" - Adversarial ratio: 40%");
262 println!(" - Training epochs: 20");
263
264 // Generate synthetic training data
265 let train_data = generate_quantum_dataset(200, 4);
266 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
267
268 let val_data = generate_quantum_dataset(50, 4);
269 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
270
271 // Train with adversarial examples
272 println!("\n Starting adversarial training...");
273 let mut optimizer = Adam::new(0.001);
274 let losses = trainer.train(
275 &train_data,
276 &train_labels,
277 &val_data,
278 &val_labels,
279 &mut optimizer,
280 )?;
281
282 println!(" Training completed!");
283 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
284
285 // Show final robustness metrics
286 let metrics = trainer.get_robustness_metrics();
287 println!("\n Final robustness metrics:");
288 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
289 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
290 println!(
291 " - Attack success rate: {:.3}",
292 metrics.attack_success_rate
293 );
294
295 Ok(())
296}examples/quantum_continual_learning.rs (line 83)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313/// Demonstrate Parameter Isolation
314fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 // Generate tasks with different requirements
340 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 // Simulate parameter usage tracking
353 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
354 parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385/// Demonstrate comprehensive task sequence evaluation
386fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 // Compare different strategies
390 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 // Generate challenging task sequence
416 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}
459
460/// Demonstrate forgetting analysis
461fn forgetting_analysis_demo() -> Result<()> {
462 println!(" Detailed forgetting analysis...");
463
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 12 },
467 QNNLayerType::MeasurementLayer {
468 measurement_basis: "computational".to_string(),
469 },
470 ];
471
472 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
473
474 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
475 importance_weight: 1000.0,
476 fisher_samples: 150,
477 };
478
479 let mut learner = QuantumContinualLearner::new(model, strategy);
480
481 // Create tasks with increasing difficulty
482 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
483
484 println!("\n Learning tasks with increasing difficulty...");
485
486 let mut optimizer = Adam::new(0.001);
487 let mut accuracy_matrix = Vec::new();
488
489 for (i, task) in tasks.iter().enumerate() {
490 println!(
491 " \n Learning {} (difficulty level {})...",
492 task.task_id,
493 i + 1
494 );
495
496 learner.learn_task(task.clone(), &mut optimizer, 25)?;
497
498 // Evaluate on all tasks learned so far
499 let all_accuracies = learner.evaluate_all_tasks()?;
500 let mut current_row = Vec::new();
501
502 for j in 0..=i {
503 let task_id = &tasks[j].task_id;
504 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
505 current_row.push(*accuracy);
506 }
507
508 accuracy_matrix.push(current_row.clone());
509
510 // Print current performance
511 for (j, &acc) in current_row.iter().enumerate() {
512 println!(" - Task {}: {:.3}", j + 1, acc);
513 }
514 }
515
516 println!("\n Forgetting Analysis Results:");
517
518 // Compute backward transfer
519 for i in 1..accuracy_matrix.len() {
520 for j in 0..i {
521 let current_acc = accuracy_matrix[i][j];
522 let original_acc = accuracy_matrix[j][j];
523 let forgetting = (original_acc - current_acc).max(0.0);
524
525 if forgetting > 0.1 {
526 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
527 j + 1, i + 1, forgetting);
528 }
529 }
530 }
531
532 // Compute average forgetting
533 let mut total_forgetting = 0.0;
534 let mut num_comparisons = 0;
535
536 for i in 1..accuracy_matrix.len() {
537 for j in 0..i {
538 let current_acc = accuracy_matrix[i][j];
539 let original_acc = accuracy_matrix[j][j];
540 total_forgetting += (original_acc - current_acc).max(0.0);
541 num_comparisons += 1;
542 }
543 }
544
545 let avg_forgetting = if num_comparisons > 0 {
546 total_forgetting / f64::from(num_comparisons)
547 } else {
548 0.0
549 };
550
551 println!(" - Average forgetting: {avg_forgetting:.3}");
552
553 // Compute final average accuracy
554 if let Some(final_row) = accuracy_matrix.last() {
555 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
556 println!(" - Final average accuracy: {final_avg:.3}");
557 println!(
558 " - Continual learning effectiveness: {:.1}%",
559 (1.0 - avg_forgetting) * 100.0
560 );
561 }
562
563 Ok(())
564}Additional examples can be found in:
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Adam
impl RefUnwindSafe for Adam
impl Send for Adam
impl Sync for Adam
impl Unpin for Adam
impl UnwindSafe for Adam
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.