pub struct QuantumContinualLearner { /* private fields */ }Expand description
Quantum continual learner
Implementations§
Source§impl QuantumContinualLearner
impl QuantumContinualLearner
Sourcepub fn new(
model: QuantumNeuralNetwork,
strategy: ContinualLearningStrategy,
) -> Self
pub fn new( model: QuantumNeuralNetwork, strategy: ContinualLearningStrategy, ) -> Self
Create a new quantum continual learner
Examples found in repository?
examples/quantum_continual_learning.rs (line 72)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313/// Demonstrate Parameter Isolation
314fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 // Generate tasks with different requirements
340 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 // Simulate parameter usage tracking
353 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
354 parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385/// Demonstrate comprehensive task sequence evaluation
386fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 // Compare different strategies
390 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 // Generate challenging task sequence
416 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}
459
460/// Demonstrate forgetting analysis
461fn forgetting_analysis_demo() -> Result<()> {
462 println!(" Detailed forgetting analysis...");
463
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 12 },
467 QNNLayerType::MeasurementLayer {
468 measurement_basis: "computational".to_string(),
469 },
470 ];
471
472 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
473
474 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
475 importance_weight: 1000.0,
476 fisher_samples: 150,
477 };
478
479 let mut learner = QuantumContinualLearner::new(model, strategy);
480
481 // Create tasks with increasing difficulty
482 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
483
484 println!("\n Learning tasks with increasing difficulty...");
485
486 let mut optimizer = Adam::new(0.001);
487 let mut accuracy_matrix = Vec::new();
488
489 for (i, task) in tasks.iter().enumerate() {
490 println!(
491 " \n Learning {} (difficulty level {})...",
492 task.task_id,
493 i + 1
494 );
495
496 learner.learn_task(task.clone(), &mut optimizer, 25)?;
497
498 // Evaluate on all tasks learned so far
499 let all_accuracies = learner.evaluate_all_tasks()?;
500 let mut current_row = Vec::new();
501
502 for j in 0..=i {
503 let task_id = &tasks[j].task_id;
504 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
505 current_row.push(*accuracy);
506 }
507
508 accuracy_matrix.push(current_row.clone());
509
510 // Print current performance
511 for (j, &acc) in current_row.iter().enumerate() {
512 println!(" - Task {}: {:.3}", j + 1, acc);
513 }
514 }
515
516 println!("\n Forgetting Analysis Results:");
517
518 // Compute backward transfer
519 for i in 1..accuracy_matrix.len() {
520 for j in 0..i {
521 let current_acc = accuracy_matrix[i][j];
522 let original_acc = accuracy_matrix[j][j];
523 let forgetting = (original_acc - current_acc).max(0.0);
524
525 if forgetting > 0.1 {
526 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
527 j + 1, i + 1, forgetting);
528 }
529 }
530 }
531
532 // Compute average forgetting
533 let mut total_forgetting = 0.0;
534 let mut num_comparisons = 0;
535
536 for i in 1..accuracy_matrix.len() {
537 for j in 0..i {
538 let current_acc = accuracy_matrix[i][j];
539 let original_acc = accuracy_matrix[j][j];
540 total_forgetting += (original_acc - current_acc).max(0.0);
541 num_comparisons += 1;
542 }
543 }
544
545 let avg_forgetting = if num_comparisons > 0 {
546 total_forgetting / f64::from(num_comparisons)
547 } else {
548 0.0
549 };
550
551 println!(" - Average forgetting: {avg_forgetting:.3}");
552
553 // Compute final average accuracy
554 if let Some(final_row) = accuracy_matrix.last() {
555 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
556 println!(" - Final average accuracy: {final_avg:.3}");
557 println!(
558 " - Continual learning effectiveness: {:.1}%",
559 (1.0 - avg_forgetting) * 100.0
560 );
561 }
562
563 Ok(())
564}Sourcepub fn learn_task(
&mut self,
task: ContinualTask,
optimizer: &mut dyn Optimizer,
epochs: usize,
) -> Result<TaskMetrics>
pub fn learn_task( &mut self, task: ContinualTask, optimizer: &mut dyn Optimizer, epochs: usize, ) -> Result<TaskMetrics>
Learn a new task
Examples found in repository?
examples/quantum_continual_learning.rs (line 89)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313/// Demonstrate Parameter Isolation
314fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 // Generate tasks with different requirements
340 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 // Simulate parameter usage tracking
353 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
354 parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385/// Demonstrate comprehensive task sequence evaluation
386fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 // Compare different strategies
390 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 // Generate challenging task sequence
416 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}
459
460/// Demonstrate forgetting analysis
461fn forgetting_analysis_demo() -> Result<()> {
462 println!(" Detailed forgetting analysis...");
463
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 12 },
467 QNNLayerType::MeasurementLayer {
468 measurement_basis: "computational".to_string(),
469 },
470 ];
471
472 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
473
474 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
475 importance_weight: 1000.0,
476 fisher_samples: 150,
477 };
478
479 let mut learner = QuantumContinualLearner::new(model, strategy);
480
481 // Create tasks with increasing difficulty
482 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
483
484 println!("\n Learning tasks with increasing difficulty...");
485
486 let mut optimizer = Adam::new(0.001);
487 let mut accuracy_matrix = Vec::new();
488
489 for (i, task) in tasks.iter().enumerate() {
490 println!(
491 " \n Learning {} (difficulty level {})...",
492 task.task_id,
493 i + 1
494 );
495
496 learner.learn_task(task.clone(), &mut optimizer, 25)?;
497
498 // Evaluate on all tasks learned so far
499 let all_accuracies = learner.evaluate_all_tasks()?;
500 let mut current_row = Vec::new();
501
502 for j in 0..=i {
503 let task_id = &tasks[j].task_id;
504 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
505 current_row.push(*accuracy);
506 }
507
508 accuracy_matrix.push(current_row.clone());
509
510 // Print current performance
511 for (j, &acc) in current_row.iter().enumerate() {
512 println!(" - Task {}: {:.3}", j + 1, acc);
513 }
514 }
515
516 println!("\n Forgetting Analysis Results:");
517
518 // Compute backward transfer
519 for i in 1..accuracy_matrix.len() {
520 for j in 0..i {
521 let current_acc = accuracy_matrix[i][j];
522 let original_acc = accuracy_matrix[j][j];
523 let forgetting = (original_acc - current_acc).max(0.0);
524
525 if forgetting > 0.1 {
526 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
527 j + 1, i + 1, forgetting);
528 }
529 }
530 }
531
532 // Compute average forgetting
533 let mut total_forgetting = 0.0;
534 let mut num_comparisons = 0;
535
536 for i in 1..accuracy_matrix.len() {
537 for j in 0..i {
538 let current_acc = accuracy_matrix[i][j];
539 let original_acc = accuracy_matrix[j][j];
540 total_forgetting += (original_acc - current_acc).max(0.0);
541 num_comparisons += 1;
542 }
543 }
544
545 let avg_forgetting = if num_comparisons > 0 {
546 total_forgetting / f64::from(num_comparisons)
547 } else {
548 0.0
549 };
550
551 println!(" - Average forgetting: {avg_forgetting:.3}");
552
553 // Compute final average accuracy
554 if let Some(final_row) = accuracy_matrix.last() {
555 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
556 println!(" - Final average accuracy: {final_avg:.3}");
557 println!(
558 " - Continual learning effectiveness: {:.1}%",
559 (1.0 - avg_forgetting) * 100.0
560 );
561 }
562
563 Ok(())
564}Sourcepub fn evaluate_all_tasks(&mut self) -> Result<HashMap<String, f64>>
pub fn evaluate_all_tasks(&mut self) -> Result<HashMap<String, f64>>
Evaluate all previous tasks to measure forgetting
Examples found in repository?
examples/quantum_continual_learning.rs (line 96)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313/// Demonstrate Parameter Isolation
314fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 // Generate tasks with different requirements
340 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 // Simulate parameter usage tracking
353 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
354 parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385/// Demonstrate comprehensive task sequence evaluation
386fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 // Compare different strategies
390 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 // Generate challenging task sequence
416 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}
459
460/// Demonstrate forgetting analysis
461fn forgetting_analysis_demo() -> Result<()> {
462 println!(" Detailed forgetting analysis...");
463
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 12 },
467 QNNLayerType::MeasurementLayer {
468 measurement_basis: "computational".to_string(),
469 },
470 ];
471
472 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
473
474 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
475 importance_weight: 1000.0,
476 fisher_samples: 150,
477 };
478
479 let mut learner = QuantumContinualLearner::new(model, strategy);
480
481 // Create tasks with increasing difficulty
482 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
483
484 println!("\n Learning tasks with increasing difficulty...");
485
486 let mut optimizer = Adam::new(0.001);
487 let mut accuracy_matrix = Vec::new();
488
489 for (i, task) in tasks.iter().enumerate() {
490 println!(
491 " \n Learning {} (difficulty level {})...",
492 task.task_id,
493 i + 1
494 );
495
496 learner.learn_task(task.clone(), &mut optimizer, 25)?;
497
498 // Evaluate on all tasks learned so far
499 let all_accuracies = learner.evaluate_all_tasks()?;
500 let mut current_row = Vec::new();
501
502 for j in 0..=i {
503 let task_id = &tasks[j].task_id;
504 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
505 current_row.push(*accuracy);
506 }
507
508 accuracy_matrix.push(current_row.clone());
509
510 // Print current performance
511 for (j, &acc) in current_row.iter().enumerate() {
512 println!(" - Task {}: {:.3}", j + 1, acc);
513 }
514 }
515
516 println!("\n Forgetting Analysis Results:");
517
518 // Compute backward transfer
519 for i in 1..accuracy_matrix.len() {
520 for j in 0..i {
521 let current_acc = accuracy_matrix[i][j];
522 let original_acc = accuracy_matrix[j][j];
523 let forgetting = (original_acc - current_acc).max(0.0);
524
525 if forgetting > 0.1 {
526 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
527 j + 1, i + 1, forgetting);
528 }
529 }
530 }
531
532 // Compute average forgetting
533 let mut total_forgetting = 0.0;
534 let mut num_comparisons = 0;
535
536 for i in 1..accuracy_matrix.len() {
537 for j in 0..i {
538 let current_acc = accuracy_matrix[i][j];
539 let original_acc = accuracy_matrix[j][j];
540 total_forgetting += (original_acc - current_acc).max(0.0);
541 num_comparisons += 1;
542 }
543 }
544
545 let avg_forgetting = if num_comparisons > 0 {
546 total_forgetting / f64::from(num_comparisons)
547 } else {
548 0.0
549 };
550
551 println!(" - Average forgetting: {avg_forgetting:.3}");
552
553 // Compute final average accuracy
554 if let Some(final_row) = accuracy_matrix.last() {
555 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
556 println!(" - Final average accuracy: {final_avg:.3}");
557 println!(
558 " - Continual learning effectiveness: {:.1}%",
559 (1.0 - avg_forgetting) * 100.0
560 );
561 }
562
563 Ok(())
564}Sourcepub fn get_forgetting_metrics(&self) -> &ForgettingMetrics
pub fn get_forgetting_metrics(&self) -> &ForgettingMetrics
Get forgetting metrics
Examples found in repository?
examples/quantum_continual_learning.rs (line 109)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313/// Demonstrate Parameter Isolation
314fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 // Generate tasks with different requirements
340 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 // Simulate parameter usage tracking
353 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
354 parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385/// Demonstrate comprehensive task sequence evaluation
386fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 // Compare different strategies
390 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 // Generate challenging task sequence
416 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}Sourcepub fn get_task_metrics(&self) -> &HashMap<String, TaskMetrics>
pub fn get_task_metrics(&self) -> &HashMap<String, TaskMetrics>
Get task metrics
Sourcepub fn get_model(&self) -> &QuantumNeuralNetwork
pub fn get_model(&self) -> &QuantumNeuralNetwork
Get current model
Auto Trait Implementations§
impl Freeze for QuantumContinualLearner
impl RefUnwindSafe for QuantumContinualLearner
impl Send for QuantumContinualLearner
impl Sync for QuantumContinualLearner
impl Unpin for QuantumContinualLearner
impl UnwindSafe for QuantumContinualLearner
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.