pub struct Adam { /* private fields */ }
Expand description
Adam optimizer
Implementations§
Source§impl Adam
impl Adam
Sourcepub fn new(learning_rate: f64) -> Self
pub fn new(learning_rate: f64) -> Self
Examples found in repository?
examples/few_shot_learning.rs (line 104)
83fn test_prototypical_networks(
84 data: &Array2<f64>,
85 labels: &Array1<usize>,
86 qnn: QuantumNeuralNetwork,
87) -> Result<()> {
88 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn);
89
90 // Generate episodes for training
91 let num_episodes = 10;
92 let mut episodes = Vec::new();
93
94 for _ in 0..num_episodes {
95 let episode = FewShotLearner::generate_episode(
96 data, labels, 5, // 5-way
97 3, // 3-shot
98 5, // 5 query examples per class
99 )?;
100 episodes.push(episode);
101 }
102
103 // Train
104 let mut optimizer = Adam::new(0.01);
105 let accuracies = learner.train(&episodes, &mut optimizer, 20)?;
106
107 // Print results
108 println!(" Training completed:");
109 println!(" - Initial accuracy: {:.2}%", accuracies[0] * 100.0);
110 println!(
111 " - Final accuracy: {:.2}%",
112 accuracies.last().unwrap() * 100.0
113 );
114 println!(
115 " - Improvement: {:.2}%",
116 (accuracies.last().unwrap() - accuracies[0]) * 100.0
117 );
118
119 Ok(())
120}
121
122/// Test MAML
123fn test_maml(data: &Array2<f64>, labels: &Array1<usize>, qnn: QuantumNeuralNetwork) -> Result<()> {
124 let mut learner = FewShotLearner::new(
125 FewShotMethod::MAML {
126 inner_steps: 5,
127 inner_lr: 0.01,
128 },
129 qnn,
130 );
131
132 // Generate meta-training tasks
133 let num_tasks = 20;
134 let mut tasks = Vec::new();
135
136 for _ in 0..num_tasks {
137 let task = FewShotLearner::generate_episode(
138 data, labels, 3, // 3-way (fewer classes for MAML)
139 5, // 5-shot
140 5, // 5 query examples
141 )?;
142 tasks.push(task);
143 }
144
145 // Meta-train
146 let mut meta_optimizer = Adam::new(0.001);
147 let losses = learner.train(&tasks, &mut meta_optimizer, 10)?;
148
149 println!(" Meta-training completed:");
150 println!(" - Initial loss: {:.4}", losses[0]);
151 println!(" - Final loss: {:.4}", losses.last().unwrap());
152 println!(
153 " - Convergence rate: {:.2}%",
154 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
155 );
156
157 Ok(())
158}
159
160/// Compare performance across different K-shot values
161fn compare_shot_performance(
162 data: &Array2<f64>,
163 labels: &Array1<usize>,
164 qnn: QuantumNeuralNetwork,
165) -> Result<()> {
166 let k_values = vec![1, 3, 5, 10];
167
168 for k in k_values {
169 println!("\n Testing {}-shot learning:", k);
170
171 let mut learner = FewShotLearner::new(FewShotMethod::PrototypicalNetworks, qnn.clone());
172
173 // Generate episodes
174 let mut episodes = Vec::new();
175 for _ in 0..5 {
176 let episode = FewShotLearner::generate_episode(
177 data, labels, 3, // 3-way
178 k, // k-shot
179 5, // 5 query
180 )?;
181 episodes.push(episode);
182 }
183
184 // Quick training
185 let mut optimizer = Adam::new(0.01);
186 let accuracies = learner.train(&episodes, &mut optimizer, 10)?;
187
188 println!(
189 " Final accuracy: {:.2}%",
190 accuracies.last().unwrap() * 100.0
191 );
192 }
193
194 Ok(())
195}
More examples
examples/quantum_diffusion.rs (line 108)
86fn train_diffusion_model() -> Result<()> {
87 // Generate synthetic 2D data (two moons)
88 let num_samples = 200;
89 let data = generate_two_moons(num_samples);
90
91 println!(" Generated {} samples of 2D two-moons data", num_samples);
92
93 // Create diffusion model
94 let mut model = QuantumDiffusionModel::new(
95 2, // data dimension
96 4, // num qubits
97 50, // timesteps
98 NoiseSchedule::Cosine { s: 0.008 },
99 )?;
100
101 println!(" Created quantum diffusion model:");
102 println!(" - Data dimension: 2");
103 println!(" - Qubits: 4");
104 println!(" - Timesteps: 50");
105 println!(" - Schedule: Cosine");
106
107 // Train model
108 let mut optimizer = Adam::new(0.001);
109 let epochs = 100;
110 let batch_size = 32;
111
112 println!("\n Training for {} epochs...", epochs);
113 let losses = model.train(&data, &mut optimizer, epochs, batch_size)?;
114
115 // Print training statistics
116 println!("\n Training Statistics:");
117 println!(" - Initial loss: {:.4}", losses[0]);
118 println!(" - Final loss: {:.4}", losses.last().unwrap());
119 println!(
120 " - Improvement: {:.2}%",
121 (1.0 - losses.last().unwrap() / losses[0]) * 100.0
122 );
123
124 Ok(())
125}
examples/continuous_rl.rs (line 89)
69fn train_qddpg_pendulum() -> Result<()> {
70 let state_dim = 3;
71 let action_dim = 1;
72 let action_bounds = vec![(-2.0, 2.0)];
73 let num_qubits = 4;
74 let buffer_capacity = 10000;
75
76 // Create QDDPG agent
77 let mut agent = QuantumDDPG::new(
78 state_dim,
79 action_dim,
80 action_bounds,
81 num_qubits,
82 buffer_capacity,
83 )?;
84
85 // Create environment
86 let mut env = PendulumEnvironment::new();
87
88 // Create optimizers
89 let mut actor_optimizer = Adam::new(0.001);
90 let mut critic_optimizer = Adam::new(0.001);
91
92 // Train for a few episodes (reduced for demo)
93 let episodes = 50;
94 println!(" Training QDDPG for {} episodes...", episodes);
95
96 let rewards = agent.train(
97 &mut env,
98 episodes,
99 &mut actor_optimizer,
100 &mut critic_optimizer,
101 )?;
102
103 // Print training statistics
104 let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
105 let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
106
107 println!("\n Training Statistics:");
108 println!(" - Average initial reward: {:.2}", avg_initial);
109 println!(" - Average final reward: {:.2}", avg_final);
110 println!(" - Improvement: {:.2}", avg_final - avg_initial);
111
112 // Test trained agent
113 println!("\n Testing trained agent...");
114 test_trained_agent(&agent, &mut env)?;
115
116 Ok(())
117}
examples/quantum_meta_learning.rs (line 86)
48fn maml_demo() -> Result<()> {
49 // Create quantum model
50 let layers = vec![
51 QNNLayerType::EncodingLayer { num_features: 4 },
52 QNNLayerType::VariationalLayer { num_params: 12 },
53 QNNLayerType::EntanglementLayer {
54 connectivity: "circular".to_string(),
55 },
56 QNNLayerType::VariationalLayer { num_params: 12 },
57 QNNLayerType::MeasurementLayer {
58 measurement_basis: "computational".to_string(),
59 },
60 ];
61
62 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
63
64 // Create MAML learner
65 let algorithm = MetaLearningAlgorithm::MAML {
66 inner_steps: 5,
67 inner_lr: 0.01,
68 first_order: true, // Use first-order approximation for efficiency
69 };
70
71 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
72
73 println!(" Created MAML meta-learner:");
74 println!(" - Inner steps: 5");
75 println!(" - Inner learning rate: 0.01");
76 println!(" - Using first-order approximation");
77
78 // Generate tasks
79 let generator = TaskGenerator::new(4, 3);
80 let tasks: Vec<MetaTask> = (0..20)
81 .map(|_| generator.generate_rotation_task(30))
82 .collect();
83
84 // Meta-train
85 println!("\n Meta-training on 20 rotation tasks...");
86 let mut optimizer = Adam::new(0.001);
87 meta_learner.meta_train(&tasks, &mut optimizer, 50, 5)?;
88
89 // Test adaptation
90 let test_task = generator.generate_rotation_task(20);
91 println!("\n Testing adaptation to new task...");
92
93 let adapted_params = meta_learner.adapt_to_task(&test_task)?;
94 println!(" Successfully adapted to new task");
95 println!(
96 " Parameter adaptation magnitude: {:.4}",
97 (&adapted_params - meta_learner.meta_params())
98 .mapv(|x| x.abs())
99 .mean()
100 .unwrap()
101 );
102
103 Ok(())
104}
105
106/// Reptile algorithm demonstration
107fn reptile_demo() -> Result<()> {
108 let layers = vec![
109 QNNLayerType::EncodingLayer { num_features: 2 },
110 QNNLayerType::VariationalLayer { num_params: 8 },
111 QNNLayerType::MeasurementLayer {
112 measurement_basis: "Pauli-Z".to_string(),
113 },
114 ];
115
116 let qnn = QuantumNeuralNetwork::new(layers, 4, 2, 2)?;
117
118 let algorithm = MetaLearningAlgorithm::Reptile {
119 inner_steps: 10,
120 inner_lr: 0.1,
121 };
122
123 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
124
125 println!(" Created Reptile meta-learner:");
126 println!(" - Inner steps: 10");
127 println!(" - Inner learning rate: 0.1");
128
129 // Generate sinusoid tasks
130 let generator = TaskGenerator::new(2, 2);
131 let tasks: Vec<MetaTask> = (0..15)
132 .map(|_| generator.generate_sinusoid_task(40))
133 .collect();
134
135 println!("\n Meta-training on 15 sinusoid tasks...");
136 let mut optimizer = Adam::new(0.001);
137 meta_learner.meta_train(&tasks, &mut optimizer, 30, 3)?;
138
139 println!(" Reptile training complete");
140
141 // Analyze task similarities
142 println!("\n Task parameter statistics:");
143 for (i, task) in tasks.iter().take(3).enumerate() {
144 if let Some(amplitude) = task.metadata.get("amplitude") {
145 if let Some(phase) = task.metadata.get("phase") {
146 println!(
147 " Task {}: amplitude={:.2}, phase={:.2}",
148 i, amplitude, phase
149 );
150 }
151 }
152 }
153
154 Ok(())
155}
156
157/// ProtoMAML demonstration
158fn protomaml_demo() -> Result<()> {
159 let layers = vec![
160 QNNLayerType::EncodingLayer { num_features: 8 },
161 QNNLayerType::VariationalLayer { num_params: 16 },
162 QNNLayerType::EntanglementLayer {
163 connectivity: "full".to_string(),
164 },
165 QNNLayerType::MeasurementLayer {
166 measurement_basis: "computational".to_string(),
167 },
168 ];
169
170 let qnn = QuantumNeuralNetwork::new(layers, 4, 8, 16)?;
171
172 let algorithm = MetaLearningAlgorithm::ProtoMAML {
173 inner_steps: 5,
174 inner_lr: 0.01,
175 proto_weight: 0.5, // Weight for prototype regularization
176 };
177
178 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
179
180 println!(" Created ProtoMAML meta-learner:");
181 println!(" - Combines MAML with prototypical networks");
182 println!(" - Prototype weight: 0.5");
183
184 // Generate classification tasks
185 let generator = TaskGenerator::new(8, 4);
186 let tasks: Vec<MetaTask> = (0..10)
187 .map(|_| generator.generate_rotation_task(50))
188 .collect();
189
190 println!("\n Meta-training on 4-way classification tasks...");
191 let mut optimizer = Adam::new(0.001);
192 meta_learner.meta_train(&tasks, &mut optimizer, 40, 2)?;
193
194 println!(" ProtoMAML leverages both gradient-based and metric-based learning");
195
196 Ok(())
197}
198
199/// Meta-SGD demonstration
200fn metasgd_demo() -> Result<()> {
201 let layers = vec![
202 QNNLayerType::EncodingLayer { num_features: 4 },
203 QNNLayerType::VariationalLayer { num_params: 12 },
204 QNNLayerType::MeasurementLayer {
205 measurement_basis: "Pauli-XYZ".to_string(),
206 },
207 ];
208
209 let qnn = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
210
211 let algorithm = MetaLearningAlgorithm::MetaSGD { inner_steps: 3 };
212
213 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
214
215 println!(" Created Meta-SGD learner:");
216 println!(" - Learns per-parameter learning rates");
217 println!(" - Inner steps: 3");
218
219 // Generate diverse tasks
220 let generator = TaskGenerator::new(4, 3);
221 let mut tasks = Vec::new();
222
223 // Mix different task types
224 for i in 0..12 {
225 if i % 2 == 0 {
226 tasks.push(generator.generate_rotation_task(30));
227 } else {
228 tasks.push(generator.generate_sinusoid_task(30));
229 }
230 }
231
232 println!("\n Meta-training on mixed task distribution...");
233 let mut optimizer = Adam::new(0.0005);
234 meta_learner.meta_train(&tasks, &mut optimizer, 50, 4)?;
235
236 if let Some(lr) = meta_learner.per_param_lr() {
237 println!("\n Learned per-parameter learning rates:");
238 println!(
239 " - Min LR: {:.4}",
240 lr.iter().cloned().fold(f64::INFINITY, f64::min)
241 );
242 println!(
243 " - Max LR: {:.4}",
244 lr.iter().cloned().fold(f64::NEG_INFINITY, f64::max)
245 );
246 println!(" - Mean LR: {:.4}", lr.mean().unwrap());
247 }
248
249 Ok(())
250}
251
252/// ANIL demonstration
253fn anil_demo() -> Result<()> {
254 let layers = vec![
255 QNNLayerType::EncodingLayer { num_features: 6 },
256 QNNLayerType::VariationalLayer { num_params: 12 },
257 QNNLayerType::EntanglementLayer {
258 connectivity: "circular".to_string(),
259 },
260 QNNLayerType::VariationalLayer { num_params: 12 },
261 QNNLayerType::VariationalLayer { num_params: 6 }, // Final layer (adapted)
262 QNNLayerType::MeasurementLayer {
263 measurement_basis: "computational".to_string(),
264 },
265 ];
266
267 let qnn = QuantumNeuralNetwork::new(layers, 4, 6, 2)?;
268
269 let algorithm = MetaLearningAlgorithm::ANIL {
270 inner_steps: 10,
271 inner_lr: 0.1,
272 };
273
274 let mut meta_learner = QuantumMetaLearner::new(algorithm, qnn);
275
276 println!(" Created ANIL (Almost No Inner Loop) learner:");
277 println!(" - Only adapts final layer during inner loop");
278 println!(" - More parameter efficient than MAML");
279 println!(" - Inner steps: 10");
280
281 // Generate binary classification tasks
282 let generator = TaskGenerator::new(6, 2);
283 let tasks: Vec<MetaTask> = (0..15)
284 .map(|_| generator.generate_rotation_task(40))
285 .collect();
286
287 println!("\n Meta-training on binary classification tasks...");
288 let mut optimizer = Adam::new(0.001);
289 meta_learner.meta_train(&tasks, &mut optimizer, 40, 5)?;
290
291 println!(" ANIL reduces computational cost while maintaining performance");
292
293 Ok(())
294}
examples/quantum_adversarial.rs (line 272)
225fn adversarial_training_demo() -> Result<()> {
226 // Create model and trainer
227 let layers = vec![
228 QNNLayerType::EncodingLayer { num_features: 4 },
229 QNNLayerType::VariationalLayer { num_params: 12 },
230 QNNLayerType::EntanglementLayer {
231 connectivity: "circular".to_string(),
232 },
233 QNNLayerType::MeasurementLayer {
234 measurement_basis: "computational".to_string(),
235 },
236 ];
237
238 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
239
240 let defense = QuantumDefenseStrategy::AdversarialTraining {
241 attack_types: vec![
242 QuantumAttackType::FGSM { epsilon: 0.08 },
243 QuantumAttackType::PGD {
244 epsilon: 0.08,
245 alpha: 0.01,
246 num_steps: 7,
247 },
248 ],
249 adversarial_ratio: 0.4,
250 };
251
252 let mut config = create_default_adversarial_config();
253 config.epochs = 20; // Reduced for demo
254 config.eval_interval = 5;
255
256 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
257
258 println!(" Adversarial training configuration:");
259 println!(" - Attack types: FGSM + PGD");
260 println!(" - Adversarial ratio: 40%");
261 println!(" - Training epochs: 20");
262
263 // Generate synthetic training data
264 let train_data = generate_quantum_dataset(200, 4);
265 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
266
267 let val_data = generate_quantum_dataset(50, 4);
268 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
269
270 // Train with adversarial examples
271 println!("\n Starting adversarial training...");
272 let mut optimizer = Adam::new(0.001);
273 let losses = trainer.train(
274 &train_data,
275 &train_labels,
276 &val_data,
277 &val_labels,
278 &mut optimizer,
279 )?;
280
281 println!(" Training completed!");
282 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
283
284 // Show final robustness metrics
285 let metrics = trainer.get_robustness_metrics();
286 println!("\n Final robustness metrics:");
287 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
288 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
289 println!(
290 " - Attack success rate: {:.3}",
291 metrics.attack_success_rate
292 );
293
294 Ok(())
295}
examples/quantum_continual_learning.rs (line 82)
49fn ewc_demo() -> Result<()> {
50 // Create quantum model
51 let layers = vec![
52 QNNLayerType::EncodingLayer { num_features: 4 },
53 QNNLayerType::VariationalLayer { num_params: 12 },
54 QNNLayerType::EntanglementLayer {
55 connectivity: "circular".to_string(),
56 },
57 QNNLayerType::VariationalLayer { num_params: 8 },
58 QNNLayerType::MeasurementLayer {
59 measurement_basis: "computational".to_string(),
60 },
61 ];
62
63 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
64
65 // Create EWC strategy
66 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
67 importance_weight: 1000.0,
68 fisher_samples: 200,
69 };
70
71 let mut learner = QuantumContinualLearner::new(model, strategy);
72
73 println!(" Created EWC continual learner:");
74 println!(" - Importance weight: 1000.0");
75 println!(" - Fisher samples: 200");
76
77 // Generate task sequence
78 let tasks = generate_task_sequence(3, 100, 4);
79
80 println!("\n Learning sequence of {} tasks...", tasks.len());
81
82 let mut optimizer = Adam::new(0.001);
83 let mut task_accuracies = Vec::new();
84
85 for (i, task) in tasks.iter().enumerate() {
86 println!(" \n Training on {}...", task.task_id);
87
88 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
89 task_accuracies.push(metrics.current_accuracy);
90
91 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
92
93 // Evaluate forgetting on previous tasks
94 if i > 0 {
95 let all_accuracies = learner.evaluate_all_tasks()?;
96 let avg_prev_accuracy = all_accuracies
97 .iter()
98 .take(i)
99 .map(|(_, &acc)| acc)
100 .sum::<f64>()
101 / i as f64;
102
103 println!(
104 " - Average accuracy on previous tasks: {:.3}",
105 avg_prev_accuracy
106 );
107 }
108 }
109
110 // Final evaluation
111 let forgetting_metrics = learner.get_forgetting_metrics();
112 println!("\n EWC Results:");
113 println!(
114 " - Average accuracy: {:.3}",
115 forgetting_metrics.average_accuracy
116 );
117 println!(
118 " - Forgetting measure: {:.3}",
119 forgetting_metrics.forgetting_measure
120 );
121 println!(
122 " - Continual learning score: {:.3}",
123 forgetting_metrics.continual_learning_score
124 );
125
126 Ok(())
127}
128
129/// Demonstrate Experience Replay
130fn experience_replay_demo() -> Result<()> {
131 let layers = vec![
132 QNNLayerType::EncodingLayer { num_features: 4 },
133 QNNLayerType::VariationalLayer { num_params: 8 },
134 QNNLayerType::MeasurementLayer {
135 measurement_basis: "computational".to_string(),
136 },
137 ];
138
139 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
140
141 let strategy = ContinualLearningStrategy::ExperienceReplay {
142 buffer_size: 500,
143 replay_ratio: 0.3,
144 memory_selection: MemorySelectionStrategy::Random,
145 };
146
147 let mut learner = QuantumContinualLearner::new(model, strategy);
148
149 println!(" Created Experience Replay learner:");
150 println!(" - Buffer size: 500");
151 println!(" - Replay ratio: 30%");
152 println!(" - Selection: Random");
153
154 // Generate diverse tasks
155 let tasks = generate_diverse_tasks(4, 80, 4);
156
157 println!("\n Learning {} diverse tasks...", tasks.len());
158
159 let mut optimizer = Adam::new(0.002);
160
161 for (i, task) in tasks.iter().enumerate() {
162 println!(" \n Learning {}...", task.task_id);
163
164 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
165
166 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
167
168 // Show memory buffer status
169 println!(" - Memory buffer usage: replay experiences stored");
170
171 if i > 0 {
172 let all_accuracies = learner.evaluate_all_tasks()?;
173 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
174 println!(" - Average retention: {:.3}", retention_rate);
175 }
176 }
177
178 let final_metrics = learner.get_forgetting_metrics();
179 println!("\n Experience Replay Results:");
180 println!(
181 " - Final average accuracy: {:.3}",
182 final_metrics.average_accuracy
183 );
184 println!(
185 " - Forgetting reduction: {:.3}",
186 1.0 - final_metrics.forgetting_measure
187 );
188
189 Ok(())
190}
191
192/// Demonstrate Progressive Networks
193fn progressive_networks_demo() -> Result<()> {
194 let layers = vec![
195 QNNLayerType::EncodingLayer { num_features: 4 },
196 QNNLayerType::VariationalLayer { num_params: 6 },
197 QNNLayerType::MeasurementLayer {
198 measurement_basis: "computational".to_string(),
199 },
200 ];
201
202 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
203
204 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
205 lateral_connections: true,
206 adaptation_layers: 2,
207 };
208
209 let mut learner = QuantumContinualLearner::new(model, strategy);
210
211 println!(" Created Progressive Networks learner:");
212 println!(" - Lateral connections: enabled");
213 println!(" - Adaptation layers: 2");
214
215 // Generate related tasks for transfer learning
216 let tasks = generate_related_tasks(3, 60, 4);
217
218 println!("\n Learning {} related tasks...", tasks.len());
219
220 let mut optimizer = Adam::new(0.001);
221 let mut learning_speeds = Vec::new();
222
223 for (i, task) in tasks.iter().enumerate() {
224 println!(" \n Adding column for {}...", task.task_id);
225
226 let start_time = std::time::Instant::now();
227 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
228 let learning_time = start_time.elapsed();
229
230 learning_speeds.push(learning_time);
231
232 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
233 println!(" - Learning time: {:.2?}", learning_time);
234
235 if i > 0 {
236 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
237 println!(" - Learning speedup: {:.2}x", speedup);
238 }
239 }
240
241 println!("\n Progressive Networks Results:");
242 println!(" - No catastrophic forgetting (by design)");
243 println!(" - Lateral connections enable knowledge transfer");
244 println!(" - Model capacity grows with new tasks");
245
246 Ok(())
247}
248
249/// Demonstrate Learning without Forgetting
250fn lwf_demo() -> Result<()> {
251 let layers = vec![
252 QNNLayerType::EncodingLayer { num_features: 4 },
253 QNNLayerType::VariationalLayer { num_params: 10 },
254 QNNLayerType::EntanglementLayer {
255 connectivity: "circular".to_string(),
256 },
257 QNNLayerType::MeasurementLayer {
258 measurement_basis: "computational".to_string(),
259 },
260 ];
261
262 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
263
264 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
265 distillation_weight: 0.5,
266 temperature: 3.0,
267 };
268
269 let mut learner = QuantumContinualLearner::new(model, strategy);
270
271 println!(" Created Learning without Forgetting learner:");
272 println!(" - Distillation weight: 0.5");
273 println!(" - Temperature: 3.0");
274
275 // Generate task sequence
276 let tasks = generate_task_sequence(4, 70, 4);
277
278 println!("\n Learning with knowledge distillation...");
279
280 let mut optimizer = Adam::new(0.001);
281 let mut distillation_losses = Vec::new();
282
283 for (i, task) in tasks.iter().enumerate() {
284 println!(" \n Learning {}...", task.task_id);
285
286 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
287
288 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
289
290 if i > 0 {
291 // Simulate distillation loss tracking
292 let distillation_loss = 0.1 + 0.3 * fastrand::f64();
293 distillation_losses.push(distillation_loss);
294 println!(" - Distillation loss: {:.3}", distillation_loss);
295
296 let all_accuracies = learner.evaluate_all_tasks()?;
297 let stability = all_accuracies
298 .values()
299 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
300 .sum::<f64>()
301 / all_accuracies.len() as f64;
302
303 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
304 }
305 }
306
307 println!("\n LwF Results:");
308 println!(" - Knowledge distillation preserves previous task performance");
309 println!(" - Temperature scaling provides soft targets");
310 println!(" - Balances plasticity and stability");
311
312 Ok(())
313}
314
315/// Demonstrate Parameter Isolation
316fn parameter_isolation_demo() -> Result<()> {
317 let layers = vec![
318 QNNLayerType::EncodingLayer { num_features: 4 },
319 QNNLayerType::VariationalLayer { num_params: 16 },
320 QNNLayerType::EntanglementLayer {
321 connectivity: "full".to_string(),
322 },
323 QNNLayerType::MeasurementLayer {
324 measurement_basis: "computational".to_string(),
325 },
326 ];
327
328 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
329
330 let strategy = ContinualLearningStrategy::ParameterIsolation {
331 allocation_strategy: ParameterAllocationStrategy::Masking,
332 growth_threshold: 0.8,
333 };
334
335 let mut learner = QuantumContinualLearner::new(model, strategy);
336
337 println!(" Created Parameter Isolation learner:");
338 println!(" - Allocation strategy: Masking");
339 println!(" - Growth threshold: 0.8");
340
341 // Generate tasks with different requirements
342 let tasks = generate_varying_complexity_tasks(3, 90, 4);
343
344 println!("\n Learning with parameter isolation...");
345
346 let mut optimizer = Adam::new(0.001);
347 let mut parameter_usage = Vec::new();
348
349 for (i, task) in tasks.iter().enumerate() {
350 println!(" \n Allocating parameters for {}...", task.task_id);
351
352 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
353
354 // Simulate parameter usage tracking
355 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
356 parameter_usage.push(used_params);
357
358 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
359 println!(" - Parameters allocated: {}/{}", used_params, 16);
360 println!(
361 " - Parameter efficiency: {:.1}%",
362 used_params as f64 / 16.0 * 100.0
363 );
364
365 if i > 0 {
366 let all_accuracies = learner.evaluate_all_tasks()?;
367 let interference = 1.0
368 - all_accuracies
369 .values()
370 .take(i)
371 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
372 .sum::<f64>()
373 / i as f64;
374
375 println!(" - Task interference: {:.1}%", interference * 100.0);
376 }
377 }
378
379 println!("\n Parameter Isolation Results:");
380 println!(" - Dedicated parameters prevent interference");
381 println!(" - Scalable to many tasks");
382 println!(" - Maintains task-specific knowledge");
383
384 Ok(())
385}
386
387/// Demonstrate comprehensive task sequence evaluation
388fn task_sequence_demo() -> Result<()> {
389 println!(" Comprehensive continual learning evaluation...");
390
391 // Compare different strategies
392 let strategies = vec![
393 (
394 "EWC",
395 ContinualLearningStrategy::ElasticWeightConsolidation {
396 importance_weight: 500.0,
397 fisher_samples: 100,
398 },
399 ),
400 (
401 "Experience Replay",
402 ContinualLearningStrategy::ExperienceReplay {
403 buffer_size: 300,
404 replay_ratio: 0.2,
405 memory_selection: MemorySelectionStrategy::Random,
406 },
407 ),
408 (
409 "Quantum Regularization",
410 ContinualLearningStrategy::QuantumRegularization {
411 entanglement_preservation: 0.1,
412 parameter_drift_penalty: 0.5,
413 },
414 ),
415 ];
416
417 // Generate challenging task sequence
418 let tasks = generate_challenging_sequence(5, 60, 4);
419
420 println!(
421 "\n Comparing strategies on {} challenging tasks:",
422 tasks.len()
423 );
424
425 for (strategy_name, strategy) in strategies {
426 println!("\n --- {} ---", strategy_name);
427
428 let layers = vec![
429 QNNLayerType::EncodingLayer { num_features: 4 },
430 QNNLayerType::VariationalLayer { num_params: 8 },
431 QNNLayerType::MeasurementLayer {
432 measurement_basis: "computational".to_string(),
433 },
434 ];
435
436 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
437 let mut learner = QuantumContinualLearner::new(model, strategy);
438 let mut optimizer = Adam::new(0.001);
439
440 for task in &tasks {
441 learner.learn_task(task.clone(), &mut optimizer, 20)?;
442 }
443
444 let final_metrics = learner.get_forgetting_metrics();
445 println!(
446 " - Average accuracy: {:.3}",
447 final_metrics.average_accuracy
448 );
449 println!(
450 " - Forgetting measure: {:.3}",
451 final_metrics.forgetting_measure
452 );
453 println!(
454 " - CL score: {:.3}",
455 final_metrics.continual_learning_score
456 );
457 }
458
459 Ok(())
460}
461
462/// Demonstrate forgetting analysis
463fn forgetting_analysis_demo() -> Result<()> {
464 println!(" Detailed forgetting analysis...");
465
466 let layers = vec![
467 QNNLayerType::EncodingLayer { num_features: 4 },
468 QNNLayerType::VariationalLayer { num_params: 12 },
469 QNNLayerType::MeasurementLayer {
470 measurement_basis: "computational".to_string(),
471 },
472 ];
473
474 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
475
476 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
477 importance_weight: 1000.0,
478 fisher_samples: 150,
479 };
480
481 let mut learner = QuantumContinualLearner::new(model, strategy);
482
483 // Create tasks with increasing difficulty
484 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
485
486 println!("\n Learning tasks with increasing difficulty...");
487
488 let mut optimizer = Adam::new(0.001);
489 let mut accuracy_matrix = Vec::new();
490
491 for (i, task) in tasks.iter().enumerate() {
492 println!(
493 " \n Learning {} (difficulty level {})...",
494 task.task_id,
495 i + 1
496 );
497
498 learner.learn_task(task.clone(), &mut optimizer, 25)?;
499
500 // Evaluate on all tasks learned so far
501 let all_accuracies = learner.evaluate_all_tasks()?;
502 let mut current_row = Vec::new();
503
504 for j in 0..=i {
505 let task_id = &tasks[j].task_id;
506 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
507 current_row.push(*accuracy);
508 }
509
510 accuracy_matrix.push(current_row.clone());
511
512 // Print current performance
513 for (j, &acc) in current_row.iter().enumerate() {
514 println!(" - Task {}: {:.3}", j + 1, acc);
515 }
516 }
517
518 println!("\n Forgetting Analysis Results:");
519
520 // Compute backward transfer
521 for i in 1..accuracy_matrix.len() {
522 for j in 0..i {
523 let current_acc = accuracy_matrix[i][j];
524 let original_acc = accuracy_matrix[j][j];
525 let forgetting = (original_acc - current_acc).max(0.0);
526
527 if forgetting > 0.1 {
528 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
529 j + 1, i + 1, forgetting);
530 }
531 }
532 }
533
534 // Compute average forgetting
535 let mut total_forgetting = 0.0;
536 let mut num_comparisons = 0;
537
538 for i in 1..accuracy_matrix.len() {
539 for j in 0..i {
540 let current_acc = accuracy_matrix[i][j];
541 let original_acc = accuracy_matrix[j][j];
542 total_forgetting += (original_acc - current_acc).max(0.0);
543 num_comparisons += 1;
544 }
545 }
546
547 let avg_forgetting = if num_comparisons > 0 {
548 total_forgetting / num_comparisons as f64
549 } else {
550 0.0
551 };
552
553 println!(" - Average forgetting: {:.3}", avg_forgetting);
554
555 // Compute final average accuracy
556 if let Some(final_row) = accuracy_matrix.last() {
557 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
558 println!(" - Final average accuracy: {:.3}", final_avg);
559 println!(
560 " - Continual learning effectiveness: {:.1}%",
561 (1.0 - avg_forgetting) * 100.0
562 );
563 }
564
565 Ok(())
566}
Additional examples can be found in:
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Adam
impl RefUnwindSafe for Adam
impl Send for Adam
impl Sync for Adam
impl Unpin for Adam
impl UnwindSafe for Adam
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self
from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self
is actually part of its subset T
(and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset
but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self
to the equivalent element of its superset.