pub struct QuantumContinualLearner { /* private fields */ }Expand description
Quantum continual learner
Implementations§
Source§impl QuantumContinualLearner
impl QuantumContinualLearner
Sourcepub fn new(
model: QuantumNeuralNetwork,
strategy: ContinualLearningStrategy,
) -> Self
pub fn new( model: QuantumNeuralNetwork, strategy: ContinualLearningStrategy, ) -> Self
Create a new quantum continual learner
Examples found in repository?
examples/quantum_continual_learning.rs (line 84)
62fn ewc_demo() -> Result<()> {
63 // Create quantum model
64 let layers = vec![
65 QNNLayerType::EncodingLayer { num_features: 4 },
66 QNNLayerType::VariationalLayer { num_params: 12 },
67 QNNLayerType::EntanglementLayer {
68 connectivity: "circular".to_string(),
69 },
70 QNNLayerType::VariationalLayer { num_params: 8 },
71 QNNLayerType::MeasurementLayer {
72 measurement_basis: "computational".to_string(),
73 },
74 ];
75
76 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78 // Create EWC strategy
79 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80 importance_weight: 1000.0,
81 fisher_samples: 200,
82 };
83
84 let mut learner = QuantumContinualLearner::new(model, strategy);
85
86 println!(" Created EWC continual learner:");
87 println!(" - Importance weight: 1000.0");
88 println!(" - Fisher samples: 200");
89
90 // Generate task sequence
91 let tasks = generate_task_sequence(3, 100, 4);
92
93 println!("\n Learning sequence of {} tasks...", tasks.len());
94
95 let mut optimizer = Adam::new(0.001);
96 let mut task_accuracies = Vec::new();
97
98 for (i, task) in tasks.iter().enumerate() {
99 println!(" \n Training on {}...", task.task_id);
100
101 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102 task_accuracies.push(metrics.current_accuracy);
103
104 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
105
106 // Evaluate forgetting on previous tasks
107 if i > 0 {
108 let all_accuracies = learner.evaluate_all_tasks()?;
109 let avg_prev_accuracy = all_accuracies
110 .iter()
111 .take(i)
112 .map(|(_, &acc)| acc)
113 .sum::<f64>()
114 / i as f64;
115
116 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117 }
118 }
119
120 // Final evaluation
121 let forgetting_metrics = learner.get_forgetting_metrics();
122 println!("\n EWC Results:");
123 println!(
124 " - Average accuracy: {:.3}",
125 forgetting_metrics.average_accuracy
126 );
127 println!(
128 " - Forgetting measure: {:.3}",
129 forgetting_metrics.forgetting_measure
130 );
131 println!(
132 " - Continual learning score: {:.3}",
133 forgetting_metrics.continual_learning_score
134 );
135
136 Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141 let layers = vec![
142 QNNLayerType::EncodingLayer { num_features: 4 },
143 QNNLayerType::VariationalLayer { num_params: 8 },
144 QNNLayerType::MeasurementLayer {
145 measurement_basis: "computational".to_string(),
146 },
147 ];
148
149 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151 let strategy = ContinualLearningStrategy::ExperienceReplay {
152 buffer_size: 500,
153 replay_ratio: 0.3,
154 memory_selection: MemorySelectionStrategy::Random,
155 };
156
157 let mut learner = QuantumContinualLearner::new(model, strategy);
158
159 println!(" Created Experience Replay learner:");
160 println!(" - Buffer size: 500");
161 println!(" - Replay ratio: 30%");
162 println!(" - Selection: Random");
163
164 // Generate diverse tasks
165 let tasks = generate_diverse_tasks(4, 80, 4);
166
167 println!("\n Learning {} diverse tasks...", tasks.len());
168
169 let mut optimizer = Adam::new(0.002);
170
171 for (i, task) in tasks.iter().enumerate() {
172 println!(" \n Learning {}...", task.task_id);
173
174 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
177
178 // Show memory buffer status
179 println!(" - Memory buffer usage: replay experiences stored");
180
181 if i > 0 {
182 let all_accuracies = learner.evaluate_all_tasks()?;
183 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184 println!(" - Average retention: {retention_rate:.3}");
185 }
186 }
187
188 let final_metrics = learner.get_forgetting_metrics();
189 println!("\n Experience Replay Results:");
190 println!(
191 " - Final average accuracy: {:.3}",
192 final_metrics.average_accuracy
193 );
194 println!(
195 " - Forgetting reduction: {:.3}",
196 1.0 - final_metrics.forgetting_measure
197 );
198
199 Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204 let layers = vec![
205 QNNLayerType::EncodingLayer { num_features: 4 },
206 QNNLayerType::VariationalLayer { num_params: 6 },
207 QNNLayerType::MeasurementLayer {
208 measurement_basis: "computational".to_string(),
209 },
210 ];
211
212 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215 lateral_connections: true,
216 adaptation_layers: 2,
217 };
218
219 let mut learner = QuantumContinualLearner::new(model, strategy);
220
221 println!(" Created Progressive Networks learner:");
222 println!(" - Lateral connections: enabled");
223 println!(" - Adaptation layers: 2");
224
225 // Generate related tasks for transfer learning
226 let tasks = generate_related_tasks(3, 60, 4);
227
228 println!("\n Learning {} related tasks...", tasks.len());
229
230 let mut optimizer = Adam::new(0.001);
231 let mut learning_speeds = Vec::new();
232
233 for (i, task) in tasks.iter().enumerate() {
234 println!(" \n Adding column for {}...", task.task_id);
235
236 let start_time = std::time::Instant::now();
237 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238 let learning_time = start_time.elapsed();
239
240 learning_speeds.push(learning_time);
241
242 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
243 println!(" - Learning time: {learning_time:.2?}");
244
245 if i > 0 {
246 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247 println!(" - Learning speedup: {speedup:.2}x");
248 }
249 }
250
251 println!("\n Progressive Networks Results:");
252 println!(" - No catastrophic forgetting (by design)");
253 println!(" - Lateral connections enable knowledge transfer");
254 println!(" - Model capacity grows with new tasks");
255
256 Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261 let layers = vec![
262 QNNLayerType::EncodingLayer { num_features: 4 },
263 QNNLayerType::VariationalLayer { num_params: 10 },
264 QNNLayerType::EntanglementLayer {
265 connectivity: "circular".to_string(),
266 },
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275 distillation_weight: 0.5,
276 temperature: 3.0,
277 };
278
279 let mut learner = QuantumContinualLearner::new(model, strategy);
280
281 println!(" Created Learning without Forgetting learner:");
282 println!(" - Distillation weight: 0.5");
283 println!(" - Temperature: 3.0");
284
285 // Generate task sequence
286 let tasks = generate_task_sequence(4, 70, 4);
287
288 println!("\n Learning with knowledge distillation...");
289
290 let mut optimizer = Adam::new(0.001);
291 let mut distillation_losses = Vec::new();
292
293 for (i, task) in tasks.iter().enumerate() {
294 println!(" \n Learning {}...", task.task_id);
295
296 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
299
300 if i > 0 {
301 // Simulate distillation loss tracking
302 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303 distillation_losses.push(distillation_loss);
304 println!(" - Distillation loss: {distillation_loss:.3}");
305
306 let all_accuracies = learner.evaluate_all_tasks()?;
307 let stability = all_accuracies
308 .values()
309 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310 .sum::<f64>()
311 / all_accuracies.len() as f64;
312
313 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
314 }
315 }
316
317 println!("\n LwF Results:");
318 println!(" - Knowledge distillation preserves previous task performance");
319 println!(" - Temperature scaling provides soft targets");
320 println!(" - Balances plasticity and stability");
321
322 Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327 let layers = vec![
328 QNNLayerType::EncodingLayer { num_features: 4 },
329 QNNLayerType::VariationalLayer { num_params: 16 },
330 QNNLayerType::EntanglementLayer {
331 connectivity: "full".to_string(),
332 },
333 QNNLayerType::MeasurementLayer {
334 measurement_basis: "computational".to_string(),
335 },
336 ];
337
338 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340 let strategy = ContinualLearningStrategy::ParameterIsolation {
341 allocation_strategy: ParameterAllocationStrategy::Masking,
342 growth_threshold: 0.8,
343 };
344
345 let mut learner = QuantumContinualLearner::new(model, strategy);
346
347 println!(" Created Parameter Isolation learner:");
348 println!(" - Allocation strategy: Masking");
349 println!(" - Growth threshold: 0.8");
350
351 // Generate tasks with different requirements
352 let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354 println!("\n Learning with parameter isolation...");
355
356 let mut optimizer = Adam::new(0.001);
357 let mut parameter_usage = Vec::new();
358
359 for (i, task) in tasks.iter().enumerate() {
360 println!(" \n Allocating parameters for {}...", task.task_id);
361
362 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364 // Simulate parameter usage tracking
365 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366 parameter_usage.push(used_params);
367
368 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
369 println!(" - Parameters allocated: {}/{}", used_params, 16);
370 println!(
371 " - Parameter efficiency: {:.1}%",
372 used_params as f64 / 16.0 * 100.0
373 );
374
375 if i > 0 {
376 let all_accuracies = learner.evaluate_all_tasks()?;
377 let interference = 1.0
378 - all_accuracies
379 .values()
380 .take(i)
381 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382 .sum::<f64>()
383 / i as f64;
384
385 println!(" - Task interference: {:.1}%", interference * 100.0);
386 }
387 }
388
389 println!("\n Parameter Isolation Results:");
390 println!(" - Dedicated parameters prevent interference");
391 println!(" - Scalable to many tasks");
392 println!(" - Maintains task-specific knowledge");
393
394 Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399 println!(" Comprehensive continual learning evaluation...");
400
401 // Compare different strategies
402 let strategies = vec![
403 (
404 "EWC",
405 ContinualLearningStrategy::ElasticWeightConsolidation {
406 importance_weight: 500.0,
407 fisher_samples: 100,
408 },
409 ),
410 (
411 "Experience Replay",
412 ContinualLearningStrategy::ExperienceReplay {
413 buffer_size: 300,
414 replay_ratio: 0.2,
415 memory_selection: MemorySelectionStrategy::Random,
416 },
417 ),
418 (
419 "Quantum Regularization",
420 ContinualLearningStrategy::QuantumRegularization {
421 entanglement_preservation: 0.1,
422 parameter_drift_penalty: 0.5,
423 },
424 ),
425 ];
426
427 // Generate challenging task sequence
428 let tasks = generate_challenging_sequence(5, 60, 4);
429
430 println!(
431 "\n Comparing strategies on {} challenging tasks:",
432 tasks.len()
433 );
434
435 for (strategy_name, strategy) in strategies {
436 println!("\n --- {strategy_name} ---");
437
438 let layers = vec![
439 QNNLayerType::EncodingLayer { num_features: 4 },
440 QNNLayerType::VariationalLayer { num_params: 8 },
441 QNNLayerType::MeasurementLayer {
442 measurement_basis: "computational".to_string(),
443 },
444 ];
445
446 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447 let mut learner = QuantumContinualLearner::new(model, strategy);
448 let mut optimizer = Adam::new(0.001);
449
450 for task in &tasks {
451 learner.learn_task(task.clone(), &mut optimizer, 20)?;
452 }
453
454 let final_metrics = learner.get_forgetting_metrics();
455 println!(
456 " - Average accuracy: {:.3}",
457 final_metrics.average_accuracy
458 );
459 println!(
460 " - Forgetting measure: {:.3}",
461 final_metrics.forgetting_measure
462 );
463 println!(
464 " - CL score: {:.3}",
465 final_metrics.continual_learning_score
466 );
467 }
468
469 Ok(())
470}
471
472/// Demonstrate forgetting analysis
473fn forgetting_analysis_demo() -> Result<()> {
474 println!(" Detailed forgetting analysis...");
475
476 let layers = vec![
477 QNNLayerType::EncodingLayer { num_features: 4 },
478 QNNLayerType::VariationalLayer { num_params: 12 },
479 QNNLayerType::MeasurementLayer {
480 measurement_basis: "computational".to_string(),
481 },
482 ];
483
484 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
485
486 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
487 importance_weight: 1000.0,
488 fisher_samples: 150,
489 };
490
491 let mut learner = QuantumContinualLearner::new(model, strategy);
492
493 // Create tasks with increasing difficulty
494 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
495
496 println!("\n Learning tasks with increasing difficulty...");
497
498 let mut optimizer = Adam::new(0.001);
499 let mut accuracy_matrix = Vec::new();
500
501 for (i, task) in tasks.iter().enumerate() {
502 println!(
503 " \n Learning {} (difficulty level {})...",
504 task.task_id,
505 i + 1
506 );
507
508 learner.learn_task(task.clone(), &mut optimizer, 25)?;
509
510 // Evaluate on all tasks learned so far
511 let all_accuracies = learner.evaluate_all_tasks()?;
512 let mut current_row = Vec::new();
513
514 for j in 0..=i {
515 let task_id = &tasks[j].task_id;
516 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
517 current_row.push(*accuracy);
518 }
519
520 accuracy_matrix.push(current_row.clone());
521
522 // Print current performance
523 for (j, &acc) in current_row.iter().enumerate() {
524 println!(" - Task {}: {:.3}", j + 1, acc);
525 }
526 }
527
528 println!("\n Forgetting Analysis Results:");
529
530 // Compute backward transfer
531 for i in 1..accuracy_matrix.len() {
532 for j in 0..i {
533 let current_acc = accuracy_matrix[i][j];
534 let original_acc = accuracy_matrix[j][j];
535 let forgetting = (original_acc - current_acc).max(0.0);
536
537 if forgetting > 0.1 {
538 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
539 j + 1, i + 1, forgetting);
540 }
541 }
542 }
543
544 // Compute average forgetting
545 let mut total_forgetting = 0.0;
546 let mut num_comparisons = 0;
547
548 for i in 1..accuracy_matrix.len() {
549 for j in 0..i {
550 let current_acc = accuracy_matrix[i][j];
551 let original_acc = accuracy_matrix[j][j];
552 total_forgetting += (original_acc - current_acc).max(0.0);
553 num_comparisons += 1;
554 }
555 }
556
557 let avg_forgetting = if num_comparisons > 0 {
558 total_forgetting / f64::from(num_comparisons)
559 } else {
560 0.0
561 };
562
563 println!(" - Average forgetting: {avg_forgetting:.3}");
564
565 // Compute final average accuracy
566 if let Some(final_row) = accuracy_matrix.last() {
567 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
568 println!(" - Final average accuracy: {final_avg:.3}");
569 println!(
570 " - Continual learning effectiveness: {:.1}%",
571 (1.0 - avg_forgetting) * 100.0
572 );
573 }
574
575 Ok(())
576}Sourcepub fn learn_task(
&mut self,
task: ContinualTask,
optimizer: &mut dyn Optimizer,
epochs: usize,
) -> Result<TaskMetrics>
pub fn learn_task( &mut self, task: ContinualTask, optimizer: &mut dyn Optimizer, epochs: usize, ) -> Result<TaskMetrics>
Learn a new task
Examples found in repository?
examples/quantum_continual_learning.rs (line 101)
62fn ewc_demo() -> Result<()> {
63 // Create quantum model
64 let layers = vec![
65 QNNLayerType::EncodingLayer { num_features: 4 },
66 QNNLayerType::VariationalLayer { num_params: 12 },
67 QNNLayerType::EntanglementLayer {
68 connectivity: "circular".to_string(),
69 },
70 QNNLayerType::VariationalLayer { num_params: 8 },
71 QNNLayerType::MeasurementLayer {
72 measurement_basis: "computational".to_string(),
73 },
74 ];
75
76 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78 // Create EWC strategy
79 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80 importance_weight: 1000.0,
81 fisher_samples: 200,
82 };
83
84 let mut learner = QuantumContinualLearner::new(model, strategy);
85
86 println!(" Created EWC continual learner:");
87 println!(" - Importance weight: 1000.0");
88 println!(" - Fisher samples: 200");
89
90 // Generate task sequence
91 let tasks = generate_task_sequence(3, 100, 4);
92
93 println!("\n Learning sequence of {} tasks...", tasks.len());
94
95 let mut optimizer = Adam::new(0.001);
96 let mut task_accuracies = Vec::new();
97
98 for (i, task) in tasks.iter().enumerate() {
99 println!(" \n Training on {}...", task.task_id);
100
101 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102 task_accuracies.push(metrics.current_accuracy);
103
104 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
105
106 // Evaluate forgetting on previous tasks
107 if i > 0 {
108 let all_accuracies = learner.evaluate_all_tasks()?;
109 let avg_prev_accuracy = all_accuracies
110 .iter()
111 .take(i)
112 .map(|(_, &acc)| acc)
113 .sum::<f64>()
114 / i as f64;
115
116 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117 }
118 }
119
120 // Final evaluation
121 let forgetting_metrics = learner.get_forgetting_metrics();
122 println!("\n EWC Results:");
123 println!(
124 " - Average accuracy: {:.3}",
125 forgetting_metrics.average_accuracy
126 );
127 println!(
128 " - Forgetting measure: {:.3}",
129 forgetting_metrics.forgetting_measure
130 );
131 println!(
132 " - Continual learning score: {:.3}",
133 forgetting_metrics.continual_learning_score
134 );
135
136 Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141 let layers = vec![
142 QNNLayerType::EncodingLayer { num_features: 4 },
143 QNNLayerType::VariationalLayer { num_params: 8 },
144 QNNLayerType::MeasurementLayer {
145 measurement_basis: "computational".to_string(),
146 },
147 ];
148
149 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151 let strategy = ContinualLearningStrategy::ExperienceReplay {
152 buffer_size: 500,
153 replay_ratio: 0.3,
154 memory_selection: MemorySelectionStrategy::Random,
155 };
156
157 let mut learner = QuantumContinualLearner::new(model, strategy);
158
159 println!(" Created Experience Replay learner:");
160 println!(" - Buffer size: 500");
161 println!(" - Replay ratio: 30%");
162 println!(" - Selection: Random");
163
164 // Generate diverse tasks
165 let tasks = generate_diverse_tasks(4, 80, 4);
166
167 println!("\n Learning {} diverse tasks...", tasks.len());
168
169 let mut optimizer = Adam::new(0.002);
170
171 for (i, task) in tasks.iter().enumerate() {
172 println!(" \n Learning {}...", task.task_id);
173
174 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
177
178 // Show memory buffer status
179 println!(" - Memory buffer usage: replay experiences stored");
180
181 if i > 0 {
182 let all_accuracies = learner.evaluate_all_tasks()?;
183 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184 println!(" - Average retention: {retention_rate:.3}");
185 }
186 }
187
188 let final_metrics = learner.get_forgetting_metrics();
189 println!("\n Experience Replay Results:");
190 println!(
191 " - Final average accuracy: {:.3}",
192 final_metrics.average_accuracy
193 );
194 println!(
195 " - Forgetting reduction: {:.3}",
196 1.0 - final_metrics.forgetting_measure
197 );
198
199 Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204 let layers = vec![
205 QNNLayerType::EncodingLayer { num_features: 4 },
206 QNNLayerType::VariationalLayer { num_params: 6 },
207 QNNLayerType::MeasurementLayer {
208 measurement_basis: "computational".to_string(),
209 },
210 ];
211
212 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215 lateral_connections: true,
216 adaptation_layers: 2,
217 };
218
219 let mut learner = QuantumContinualLearner::new(model, strategy);
220
221 println!(" Created Progressive Networks learner:");
222 println!(" - Lateral connections: enabled");
223 println!(" - Adaptation layers: 2");
224
225 // Generate related tasks for transfer learning
226 let tasks = generate_related_tasks(3, 60, 4);
227
228 println!("\n Learning {} related tasks...", tasks.len());
229
230 let mut optimizer = Adam::new(0.001);
231 let mut learning_speeds = Vec::new();
232
233 for (i, task) in tasks.iter().enumerate() {
234 println!(" \n Adding column for {}...", task.task_id);
235
236 let start_time = std::time::Instant::now();
237 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238 let learning_time = start_time.elapsed();
239
240 learning_speeds.push(learning_time);
241
242 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
243 println!(" - Learning time: {learning_time:.2?}");
244
245 if i > 0 {
246 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247 println!(" - Learning speedup: {speedup:.2}x");
248 }
249 }
250
251 println!("\n Progressive Networks Results:");
252 println!(" - No catastrophic forgetting (by design)");
253 println!(" - Lateral connections enable knowledge transfer");
254 println!(" - Model capacity grows with new tasks");
255
256 Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261 let layers = vec![
262 QNNLayerType::EncodingLayer { num_features: 4 },
263 QNNLayerType::VariationalLayer { num_params: 10 },
264 QNNLayerType::EntanglementLayer {
265 connectivity: "circular".to_string(),
266 },
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275 distillation_weight: 0.5,
276 temperature: 3.0,
277 };
278
279 let mut learner = QuantumContinualLearner::new(model, strategy);
280
281 println!(" Created Learning without Forgetting learner:");
282 println!(" - Distillation weight: 0.5");
283 println!(" - Temperature: 3.0");
284
285 // Generate task sequence
286 let tasks = generate_task_sequence(4, 70, 4);
287
288 println!("\n Learning with knowledge distillation...");
289
290 let mut optimizer = Adam::new(0.001);
291 let mut distillation_losses = Vec::new();
292
293 for (i, task) in tasks.iter().enumerate() {
294 println!(" \n Learning {}...", task.task_id);
295
296 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
299
300 if i > 0 {
301 // Simulate distillation loss tracking
302 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303 distillation_losses.push(distillation_loss);
304 println!(" - Distillation loss: {distillation_loss:.3}");
305
306 let all_accuracies = learner.evaluate_all_tasks()?;
307 let stability = all_accuracies
308 .values()
309 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310 .sum::<f64>()
311 / all_accuracies.len() as f64;
312
313 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
314 }
315 }
316
317 println!("\n LwF Results:");
318 println!(" - Knowledge distillation preserves previous task performance");
319 println!(" - Temperature scaling provides soft targets");
320 println!(" - Balances plasticity and stability");
321
322 Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327 let layers = vec![
328 QNNLayerType::EncodingLayer { num_features: 4 },
329 QNNLayerType::VariationalLayer { num_params: 16 },
330 QNNLayerType::EntanglementLayer {
331 connectivity: "full".to_string(),
332 },
333 QNNLayerType::MeasurementLayer {
334 measurement_basis: "computational".to_string(),
335 },
336 ];
337
338 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340 let strategy = ContinualLearningStrategy::ParameterIsolation {
341 allocation_strategy: ParameterAllocationStrategy::Masking,
342 growth_threshold: 0.8,
343 };
344
345 let mut learner = QuantumContinualLearner::new(model, strategy);
346
347 println!(" Created Parameter Isolation learner:");
348 println!(" - Allocation strategy: Masking");
349 println!(" - Growth threshold: 0.8");
350
351 // Generate tasks with different requirements
352 let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354 println!("\n Learning with parameter isolation...");
355
356 let mut optimizer = Adam::new(0.001);
357 let mut parameter_usage = Vec::new();
358
359 for (i, task) in tasks.iter().enumerate() {
360 println!(" \n Allocating parameters for {}...", task.task_id);
361
362 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364 // Simulate parameter usage tracking
365 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366 parameter_usage.push(used_params);
367
368 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
369 println!(" - Parameters allocated: {}/{}", used_params, 16);
370 println!(
371 " - Parameter efficiency: {:.1}%",
372 used_params as f64 / 16.0 * 100.0
373 );
374
375 if i > 0 {
376 let all_accuracies = learner.evaluate_all_tasks()?;
377 let interference = 1.0
378 - all_accuracies
379 .values()
380 .take(i)
381 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382 .sum::<f64>()
383 / i as f64;
384
385 println!(" - Task interference: {:.1}%", interference * 100.0);
386 }
387 }
388
389 println!("\n Parameter Isolation Results:");
390 println!(" - Dedicated parameters prevent interference");
391 println!(" - Scalable to many tasks");
392 println!(" - Maintains task-specific knowledge");
393
394 Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399 println!(" Comprehensive continual learning evaluation...");
400
401 // Compare different strategies
402 let strategies = vec![
403 (
404 "EWC",
405 ContinualLearningStrategy::ElasticWeightConsolidation {
406 importance_weight: 500.0,
407 fisher_samples: 100,
408 },
409 ),
410 (
411 "Experience Replay",
412 ContinualLearningStrategy::ExperienceReplay {
413 buffer_size: 300,
414 replay_ratio: 0.2,
415 memory_selection: MemorySelectionStrategy::Random,
416 },
417 ),
418 (
419 "Quantum Regularization",
420 ContinualLearningStrategy::QuantumRegularization {
421 entanglement_preservation: 0.1,
422 parameter_drift_penalty: 0.5,
423 },
424 ),
425 ];
426
427 // Generate challenging task sequence
428 let tasks = generate_challenging_sequence(5, 60, 4);
429
430 println!(
431 "\n Comparing strategies on {} challenging tasks:",
432 tasks.len()
433 );
434
435 for (strategy_name, strategy) in strategies {
436 println!("\n --- {strategy_name} ---");
437
438 let layers = vec![
439 QNNLayerType::EncodingLayer { num_features: 4 },
440 QNNLayerType::VariationalLayer { num_params: 8 },
441 QNNLayerType::MeasurementLayer {
442 measurement_basis: "computational".to_string(),
443 },
444 ];
445
446 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447 let mut learner = QuantumContinualLearner::new(model, strategy);
448 let mut optimizer = Adam::new(0.001);
449
450 for task in &tasks {
451 learner.learn_task(task.clone(), &mut optimizer, 20)?;
452 }
453
454 let final_metrics = learner.get_forgetting_metrics();
455 println!(
456 " - Average accuracy: {:.3}",
457 final_metrics.average_accuracy
458 );
459 println!(
460 " - Forgetting measure: {:.3}",
461 final_metrics.forgetting_measure
462 );
463 println!(
464 " - CL score: {:.3}",
465 final_metrics.continual_learning_score
466 );
467 }
468
469 Ok(())
470}
471
472/// Demonstrate forgetting analysis
473fn forgetting_analysis_demo() -> Result<()> {
474 println!(" Detailed forgetting analysis...");
475
476 let layers = vec![
477 QNNLayerType::EncodingLayer { num_features: 4 },
478 QNNLayerType::VariationalLayer { num_params: 12 },
479 QNNLayerType::MeasurementLayer {
480 measurement_basis: "computational".to_string(),
481 },
482 ];
483
484 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
485
486 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
487 importance_weight: 1000.0,
488 fisher_samples: 150,
489 };
490
491 let mut learner = QuantumContinualLearner::new(model, strategy);
492
493 // Create tasks with increasing difficulty
494 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
495
496 println!("\n Learning tasks with increasing difficulty...");
497
498 let mut optimizer = Adam::new(0.001);
499 let mut accuracy_matrix = Vec::new();
500
501 for (i, task) in tasks.iter().enumerate() {
502 println!(
503 " \n Learning {} (difficulty level {})...",
504 task.task_id,
505 i + 1
506 );
507
508 learner.learn_task(task.clone(), &mut optimizer, 25)?;
509
510 // Evaluate on all tasks learned so far
511 let all_accuracies = learner.evaluate_all_tasks()?;
512 let mut current_row = Vec::new();
513
514 for j in 0..=i {
515 let task_id = &tasks[j].task_id;
516 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
517 current_row.push(*accuracy);
518 }
519
520 accuracy_matrix.push(current_row.clone());
521
522 // Print current performance
523 for (j, &acc) in current_row.iter().enumerate() {
524 println!(" - Task {}: {:.3}", j + 1, acc);
525 }
526 }
527
528 println!("\n Forgetting Analysis Results:");
529
530 // Compute backward transfer
531 for i in 1..accuracy_matrix.len() {
532 for j in 0..i {
533 let current_acc = accuracy_matrix[i][j];
534 let original_acc = accuracy_matrix[j][j];
535 let forgetting = (original_acc - current_acc).max(0.0);
536
537 if forgetting > 0.1 {
538 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
539 j + 1, i + 1, forgetting);
540 }
541 }
542 }
543
544 // Compute average forgetting
545 let mut total_forgetting = 0.0;
546 let mut num_comparisons = 0;
547
548 for i in 1..accuracy_matrix.len() {
549 for j in 0..i {
550 let current_acc = accuracy_matrix[i][j];
551 let original_acc = accuracy_matrix[j][j];
552 total_forgetting += (original_acc - current_acc).max(0.0);
553 num_comparisons += 1;
554 }
555 }
556
557 let avg_forgetting = if num_comparisons > 0 {
558 total_forgetting / f64::from(num_comparisons)
559 } else {
560 0.0
561 };
562
563 println!(" - Average forgetting: {avg_forgetting:.3}");
564
565 // Compute final average accuracy
566 if let Some(final_row) = accuracy_matrix.last() {
567 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
568 println!(" - Final average accuracy: {final_avg:.3}");
569 println!(
570 " - Continual learning effectiveness: {:.1}%",
571 (1.0 - avg_forgetting) * 100.0
572 );
573 }
574
575 Ok(())
576}Sourcepub fn evaluate_all_tasks(&mut self) -> Result<HashMap<String, f64>>
pub fn evaluate_all_tasks(&mut self) -> Result<HashMap<String, f64>>
Evaluate all previous tasks to measure forgetting
Examples found in repository?
examples/quantum_continual_learning.rs (line 108)
62fn ewc_demo() -> Result<()> {
63 // Create quantum model
64 let layers = vec![
65 QNNLayerType::EncodingLayer { num_features: 4 },
66 QNNLayerType::VariationalLayer { num_params: 12 },
67 QNNLayerType::EntanglementLayer {
68 connectivity: "circular".to_string(),
69 },
70 QNNLayerType::VariationalLayer { num_params: 8 },
71 QNNLayerType::MeasurementLayer {
72 measurement_basis: "computational".to_string(),
73 },
74 ];
75
76 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78 // Create EWC strategy
79 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80 importance_weight: 1000.0,
81 fisher_samples: 200,
82 };
83
84 let mut learner = QuantumContinualLearner::new(model, strategy);
85
86 println!(" Created EWC continual learner:");
87 println!(" - Importance weight: 1000.0");
88 println!(" - Fisher samples: 200");
89
90 // Generate task sequence
91 let tasks = generate_task_sequence(3, 100, 4);
92
93 println!("\n Learning sequence of {} tasks...", tasks.len());
94
95 let mut optimizer = Adam::new(0.001);
96 let mut task_accuracies = Vec::new();
97
98 for (i, task) in tasks.iter().enumerate() {
99 println!(" \n Training on {}...", task.task_id);
100
101 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102 task_accuracies.push(metrics.current_accuracy);
103
104 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
105
106 // Evaluate forgetting on previous tasks
107 if i > 0 {
108 let all_accuracies = learner.evaluate_all_tasks()?;
109 let avg_prev_accuracy = all_accuracies
110 .iter()
111 .take(i)
112 .map(|(_, &acc)| acc)
113 .sum::<f64>()
114 / i as f64;
115
116 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117 }
118 }
119
120 // Final evaluation
121 let forgetting_metrics = learner.get_forgetting_metrics();
122 println!("\n EWC Results:");
123 println!(
124 " - Average accuracy: {:.3}",
125 forgetting_metrics.average_accuracy
126 );
127 println!(
128 " - Forgetting measure: {:.3}",
129 forgetting_metrics.forgetting_measure
130 );
131 println!(
132 " - Continual learning score: {:.3}",
133 forgetting_metrics.continual_learning_score
134 );
135
136 Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141 let layers = vec![
142 QNNLayerType::EncodingLayer { num_features: 4 },
143 QNNLayerType::VariationalLayer { num_params: 8 },
144 QNNLayerType::MeasurementLayer {
145 measurement_basis: "computational".to_string(),
146 },
147 ];
148
149 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151 let strategy = ContinualLearningStrategy::ExperienceReplay {
152 buffer_size: 500,
153 replay_ratio: 0.3,
154 memory_selection: MemorySelectionStrategy::Random,
155 };
156
157 let mut learner = QuantumContinualLearner::new(model, strategy);
158
159 println!(" Created Experience Replay learner:");
160 println!(" - Buffer size: 500");
161 println!(" - Replay ratio: 30%");
162 println!(" - Selection: Random");
163
164 // Generate diverse tasks
165 let tasks = generate_diverse_tasks(4, 80, 4);
166
167 println!("\n Learning {} diverse tasks...", tasks.len());
168
169 let mut optimizer = Adam::new(0.002);
170
171 for (i, task) in tasks.iter().enumerate() {
172 println!(" \n Learning {}...", task.task_id);
173
174 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
177
178 // Show memory buffer status
179 println!(" - Memory buffer usage: replay experiences stored");
180
181 if i > 0 {
182 let all_accuracies = learner.evaluate_all_tasks()?;
183 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184 println!(" - Average retention: {retention_rate:.3}");
185 }
186 }
187
188 let final_metrics = learner.get_forgetting_metrics();
189 println!("\n Experience Replay Results:");
190 println!(
191 " - Final average accuracy: {:.3}",
192 final_metrics.average_accuracy
193 );
194 println!(
195 " - Forgetting reduction: {:.3}",
196 1.0 - final_metrics.forgetting_measure
197 );
198
199 Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204 let layers = vec![
205 QNNLayerType::EncodingLayer { num_features: 4 },
206 QNNLayerType::VariationalLayer { num_params: 6 },
207 QNNLayerType::MeasurementLayer {
208 measurement_basis: "computational".to_string(),
209 },
210 ];
211
212 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215 lateral_connections: true,
216 adaptation_layers: 2,
217 };
218
219 let mut learner = QuantumContinualLearner::new(model, strategy);
220
221 println!(" Created Progressive Networks learner:");
222 println!(" - Lateral connections: enabled");
223 println!(" - Adaptation layers: 2");
224
225 // Generate related tasks for transfer learning
226 let tasks = generate_related_tasks(3, 60, 4);
227
228 println!("\n Learning {} related tasks...", tasks.len());
229
230 let mut optimizer = Adam::new(0.001);
231 let mut learning_speeds = Vec::new();
232
233 for (i, task) in tasks.iter().enumerate() {
234 println!(" \n Adding column for {}...", task.task_id);
235
236 let start_time = std::time::Instant::now();
237 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238 let learning_time = start_time.elapsed();
239
240 learning_speeds.push(learning_time);
241
242 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
243 println!(" - Learning time: {learning_time:.2?}");
244
245 if i > 0 {
246 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247 println!(" - Learning speedup: {speedup:.2}x");
248 }
249 }
250
251 println!("\n Progressive Networks Results:");
252 println!(" - No catastrophic forgetting (by design)");
253 println!(" - Lateral connections enable knowledge transfer");
254 println!(" - Model capacity grows with new tasks");
255
256 Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261 let layers = vec![
262 QNNLayerType::EncodingLayer { num_features: 4 },
263 QNNLayerType::VariationalLayer { num_params: 10 },
264 QNNLayerType::EntanglementLayer {
265 connectivity: "circular".to_string(),
266 },
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275 distillation_weight: 0.5,
276 temperature: 3.0,
277 };
278
279 let mut learner = QuantumContinualLearner::new(model, strategy);
280
281 println!(" Created Learning without Forgetting learner:");
282 println!(" - Distillation weight: 0.5");
283 println!(" - Temperature: 3.0");
284
285 // Generate task sequence
286 let tasks = generate_task_sequence(4, 70, 4);
287
288 println!("\n Learning with knowledge distillation...");
289
290 let mut optimizer = Adam::new(0.001);
291 let mut distillation_losses = Vec::new();
292
293 for (i, task) in tasks.iter().enumerate() {
294 println!(" \n Learning {}...", task.task_id);
295
296 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
299
300 if i > 0 {
301 // Simulate distillation loss tracking
302 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303 distillation_losses.push(distillation_loss);
304 println!(" - Distillation loss: {distillation_loss:.3}");
305
306 let all_accuracies = learner.evaluate_all_tasks()?;
307 let stability = all_accuracies
308 .values()
309 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310 .sum::<f64>()
311 / all_accuracies.len() as f64;
312
313 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
314 }
315 }
316
317 println!("\n LwF Results:");
318 println!(" - Knowledge distillation preserves previous task performance");
319 println!(" - Temperature scaling provides soft targets");
320 println!(" - Balances plasticity and stability");
321
322 Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327 let layers = vec![
328 QNNLayerType::EncodingLayer { num_features: 4 },
329 QNNLayerType::VariationalLayer { num_params: 16 },
330 QNNLayerType::EntanglementLayer {
331 connectivity: "full".to_string(),
332 },
333 QNNLayerType::MeasurementLayer {
334 measurement_basis: "computational".to_string(),
335 },
336 ];
337
338 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340 let strategy = ContinualLearningStrategy::ParameterIsolation {
341 allocation_strategy: ParameterAllocationStrategy::Masking,
342 growth_threshold: 0.8,
343 };
344
345 let mut learner = QuantumContinualLearner::new(model, strategy);
346
347 println!(" Created Parameter Isolation learner:");
348 println!(" - Allocation strategy: Masking");
349 println!(" - Growth threshold: 0.8");
350
351 // Generate tasks with different requirements
352 let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354 println!("\n Learning with parameter isolation...");
355
356 let mut optimizer = Adam::new(0.001);
357 let mut parameter_usage = Vec::new();
358
359 for (i, task) in tasks.iter().enumerate() {
360 println!(" \n Allocating parameters for {}...", task.task_id);
361
362 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364 // Simulate parameter usage tracking
365 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366 parameter_usage.push(used_params);
367
368 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
369 println!(" - Parameters allocated: {}/{}", used_params, 16);
370 println!(
371 " - Parameter efficiency: {:.1}%",
372 used_params as f64 / 16.0 * 100.0
373 );
374
375 if i > 0 {
376 let all_accuracies = learner.evaluate_all_tasks()?;
377 let interference = 1.0
378 - all_accuracies
379 .values()
380 .take(i)
381 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382 .sum::<f64>()
383 / i as f64;
384
385 println!(" - Task interference: {:.1}%", interference * 100.0);
386 }
387 }
388
389 println!("\n Parameter Isolation Results:");
390 println!(" - Dedicated parameters prevent interference");
391 println!(" - Scalable to many tasks");
392 println!(" - Maintains task-specific knowledge");
393
394 Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399 println!(" Comprehensive continual learning evaluation...");
400
401 // Compare different strategies
402 let strategies = vec![
403 (
404 "EWC",
405 ContinualLearningStrategy::ElasticWeightConsolidation {
406 importance_weight: 500.0,
407 fisher_samples: 100,
408 },
409 ),
410 (
411 "Experience Replay",
412 ContinualLearningStrategy::ExperienceReplay {
413 buffer_size: 300,
414 replay_ratio: 0.2,
415 memory_selection: MemorySelectionStrategy::Random,
416 },
417 ),
418 (
419 "Quantum Regularization",
420 ContinualLearningStrategy::QuantumRegularization {
421 entanglement_preservation: 0.1,
422 parameter_drift_penalty: 0.5,
423 },
424 ),
425 ];
426
427 // Generate challenging task sequence
428 let tasks = generate_challenging_sequence(5, 60, 4);
429
430 println!(
431 "\n Comparing strategies on {} challenging tasks:",
432 tasks.len()
433 );
434
435 for (strategy_name, strategy) in strategies {
436 println!("\n --- {strategy_name} ---");
437
438 let layers = vec![
439 QNNLayerType::EncodingLayer { num_features: 4 },
440 QNNLayerType::VariationalLayer { num_params: 8 },
441 QNNLayerType::MeasurementLayer {
442 measurement_basis: "computational".to_string(),
443 },
444 ];
445
446 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447 let mut learner = QuantumContinualLearner::new(model, strategy);
448 let mut optimizer = Adam::new(0.001);
449
450 for task in &tasks {
451 learner.learn_task(task.clone(), &mut optimizer, 20)?;
452 }
453
454 let final_metrics = learner.get_forgetting_metrics();
455 println!(
456 " - Average accuracy: {:.3}",
457 final_metrics.average_accuracy
458 );
459 println!(
460 " - Forgetting measure: {:.3}",
461 final_metrics.forgetting_measure
462 );
463 println!(
464 " - CL score: {:.3}",
465 final_metrics.continual_learning_score
466 );
467 }
468
469 Ok(())
470}
471
472/// Demonstrate forgetting analysis
473fn forgetting_analysis_demo() -> Result<()> {
474 println!(" Detailed forgetting analysis...");
475
476 let layers = vec![
477 QNNLayerType::EncodingLayer { num_features: 4 },
478 QNNLayerType::VariationalLayer { num_params: 12 },
479 QNNLayerType::MeasurementLayer {
480 measurement_basis: "computational".to_string(),
481 },
482 ];
483
484 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
485
486 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
487 importance_weight: 1000.0,
488 fisher_samples: 150,
489 };
490
491 let mut learner = QuantumContinualLearner::new(model, strategy);
492
493 // Create tasks with increasing difficulty
494 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
495
496 println!("\n Learning tasks with increasing difficulty...");
497
498 let mut optimizer = Adam::new(0.001);
499 let mut accuracy_matrix = Vec::new();
500
501 for (i, task) in tasks.iter().enumerate() {
502 println!(
503 " \n Learning {} (difficulty level {})...",
504 task.task_id,
505 i + 1
506 );
507
508 learner.learn_task(task.clone(), &mut optimizer, 25)?;
509
510 // Evaluate on all tasks learned so far
511 let all_accuracies = learner.evaluate_all_tasks()?;
512 let mut current_row = Vec::new();
513
514 for j in 0..=i {
515 let task_id = &tasks[j].task_id;
516 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
517 current_row.push(*accuracy);
518 }
519
520 accuracy_matrix.push(current_row.clone());
521
522 // Print current performance
523 for (j, &acc) in current_row.iter().enumerate() {
524 println!(" - Task {}: {:.3}", j + 1, acc);
525 }
526 }
527
528 println!("\n Forgetting Analysis Results:");
529
530 // Compute backward transfer
531 for i in 1..accuracy_matrix.len() {
532 for j in 0..i {
533 let current_acc = accuracy_matrix[i][j];
534 let original_acc = accuracy_matrix[j][j];
535 let forgetting = (original_acc - current_acc).max(0.0);
536
537 if forgetting > 0.1 {
538 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
539 j + 1, i + 1, forgetting);
540 }
541 }
542 }
543
544 // Compute average forgetting
545 let mut total_forgetting = 0.0;
546 let mut num_comparisons = 0;
547
548 for i in 1..accuracy_matrix.len() {
549 for j in 0..i {
550 let current_acc = accuracy_matrix[i][j];
551 let original_acc = accuracy_matrix[j][j];
552 total_forgetting += (original_acc - current_acc).max(0.0);
553 num_comparisons += 1;
554 }
555 }
556
557 let avg_forgetting = if num_comparisons > 0 {
558 total_forgetting / f64::from(num_comparisons)
559 } else {
560 0.0
561 };
562
563 println!(" - Average forgetting: {avg_forgetting:.3}");
564
565 // Compute final average accuracy
566 if let Some(final_row) = accuracy_matrix.last() {
567 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
568 println!(" - Final average accuracy: {final_avg:.3}");
569 println!(
570 " - Continual learning effectiveness: {:.1}%",
571 (1.0 - avg_forgetting) * 100.0
572 );
573 }
574
575 Ok(())
576}Sourcepub fn get_forgetting_metrics(&self) -> &ForgettingMetrics
pub fn get_forgetting_metrics(&self) -> &ForgettingMetrics
Get forgetting metrics
Examples found in repository?
examples/quantum_continual_learning.rs (line 121)
62fn ewc_demo() -> Result<()> {
63 // Create quantum model
64 let layers = vec![
65 QNNLayerType::EncodingLayer { num_features: 4 },
66 QNNLayerType::VariationalLayer { num_params: 12 },
67 QNNLayerType::EntanglementLayer {
68 connectivity: "circular".to_string(),
69 },
70 QNNLayerType::VariationalLayer { num_params: 8 },
71 QNNLayerType::MeasurementLayer {
72 measurement_basis: "computational".to_string(),
73 },
74 ];
75
76 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78 // Create EWC strategy
79 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80 importance_weight: 1000.0,
81 fisher_samples: 200,
82 };
83
84 let mut learner = QuantumContinualLearner::new(model, strategy);
85
86 println!(" Created EWC continual learner:");
87 println!(" - Importance weight: 1000.0");
88 println!(" - Fisher samples: 200");
89
90 // Generate task sequence
91 let tasks = generate_task_sequence(3, 100, 4);
92
93 println!("\n Learning sequence of {} tasks...", tasks.len());
94
95 let mut optimizer = Adam::new(0.001);
96 let mut task_accuracies = Vec::new();
97
98 for (i, task) in tasks.iter().enumerate() {
99 println!(" \n Training on {}...", task.task_id);
100
101 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102 task_accuracies.push(metrics.current_accuracy);
103
104 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
105
106 // Evaluate forgetting on previous tasks
107 if i > 0 {
108 let all_accuracies = learner.evaluate_all_tasks()?;
109 let avg_prev_accuracy = all_accuracies
110 .iter()
111 .take(i)
112 .map(|(_, &acc)| acc)
113 .sum::<f64>()
114 / i as f64;
115
116 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117 }
118 }
119
120 // Final evaluation
121 let forgetting_metrics = learner.get_forgetting_metrics();
122 println!("\n EWC Results:");
123 println!(
124 " - Average accuracy: {:.3}",
125 forgetting_metrics.average_accuracy
126 );
127 println!(
128 " - Forgetting measure: {:.3}",
129 forgetting_metrics.forgetting_measure
130 );
131 println!(
132 " - Continual learning score: {:.3}",
133 forgetting_metrics.continual_learning_score
134 );
135
136 Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141 let layers = vec![
142 QNNLayerType::EncodingLayer { num_features: 4 },
143 QNNLayerType::VariationalLayer { num_params: 8 },
144 QNNLayerType::MeasurementLayer {
145 measurement_basis: "computational".to_string(),
146 },
147 ];
148
149 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151 let strategy = ContinualLearningStrategy::ExperienceReplay {
152 buffer_size: 500,
153 replay_ratio: 0.3,
154 memory_selection: MemorySelectionStrategy::Random,
155 };
156
157 let mut learner = QuantumContinualLearner::new(model, strategy);
158
159 println!(" Created Experience Replay learner:");
160 println!(" - Buffer size: 500");
161 println!(" - Replay ratio: 30%");
162 println!(" - Selection: Random");
163
164 // Generate diverse tasks
165 let tasks = generate_diverse_tasks(4, 80, 4);
166
167 println!("\n Learning {} diverse tasks...", tasks.len());
168
169 let mut optimizer = Adam::new(0.002);
170
171 for (i, task) in tasks.iter().enumerate() {
172 println!(" \n Learning {}...", task.task_id);
173
174 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
177
178 // Show memory buffer status
179 println!(" - Memory buffer usage: replay experiences stored");
180
181 if i > 0 {
182 let all_accuracies = learner.evaluate_all_tasks()?;
183 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184 println!(" - Average retention: {retention_rate:.3}");
185 }
186 }
187
188 let final_metrics = learner.get_forgetting_metrics();
189 println!("\n Experience Replay Results:");
190 println!(
191 " - Final average accuracy: {:.3}",
192 final_metrics.average_accuracy
193 );
194 println!(
195 " - Forgetting reduction: {:.3}",
196 1.0 - final_metrics.forgetting_measure
197 );
198
199 Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204 let layers = vec![
205 QNNLayerType::EncodingLayer { num_features: 4 },
206 QNNLayerType::VariationalLayer { num_params: 6 },
207 QNNLayerType::MeasurementLayer {
208 measurement_basis: "computational".to_string(),
209 },
210 ];
211
212 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215 lateral_connections: true,
216 adaptation_layers: 2,
217 };
218
219 let mut learner = QuantumContinualLearner::new(model, strategy);
220
221 println!(" Created Progressive Networks learner:");
222 println!(" - Lateral connections: enabled");
223 println!(" - Adaptation layers: 2");
224
225 // Generate related tasks for transfer learning
226 let tasks = generate_related_tasks(3, 60, 4);
227
228 println!("\n Learning {} related tasks...", tasks.len());
229
230 let mut optimizer = Adam::new(0.001);
231 let mut learning_speeds = Vec::new();
232
233 for (i, task) in tasks.iter().enumerate() {
234 println!(" \n Adding column for {}...", task.task_id);
235
236 let start_time = std::time::Instant::now();
237 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238 let learning_time = start_time.elapsed();
239
240 learning_speeds.push(learning_time);
241
242 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
243 println!(" - Learning time: {learning_time:.2?}");
244
245 if i > 0 {
246 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247 println!(" - Learning speedup: {speedup:.2}x");
248 }
249 }
250
251 println!("\n Progressive Networks Results:");
252 println!(" - No catastrophic forgetting (by design)");
253 println!(" - Lateral connections enable knowledge transfer");
254 println!(" - Model capacity grows with new tasks");
255
256 Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261 let layers = vec![
262 QNNLayerType::EncodingLayer { num_features: 4 },
263 QNNLayerType::VariationalLayer { num_params: 10 },
264 QNNLayerType::EntanglementLayer {
265 connectivity: "circular".to_string(),
266 },
267 QNNLayerType::MeasurementLayer {
268 measurement_basis: "computational".to_string(),
269 },
270 ];
271
272 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275 distillation_weight: 0.5,
276 temperature: 3.0,
277 };
278
279 let mut learner = QuantumContinualLearner::new(model, strategy);
280
281 println!(" Created Learning without Forgetting learner:");
282 println!(" - Distillation weight: 0.5");
283 println!(" - Temperature: 3.0");
284
285 // Generate task sequence
286 let tasks = generate_task_sequence(4, 70, 4);
287
288 println!("\n Learning with knowledge distillation...");
289
290 let mut optimizer = Adam::new(0.001);
291 let mut distillation_losses = Vec::new();
292
293 for (i, task) in tasks.iter().enumerate() {
294 println!(" \n Learning {}...", task.task_id);
295
296 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
299
300 if i > 0 {
301 // Simulate distillation loss tracking
302 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303 distillation_losses.push(distillation_loss);
304 println!(" - Distillation loss: {distillation_loss:.3}");
305
306 let all_accuracies = learner.evaluate_all_tasks()?;
307 let stability = all_accuracies
308 .values()
309 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310 .sum::<f64>()
311 / all_accuracies.len() as f64;
312
313 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
314 }
315 }
316
317 println!("\n LwF Results:");
318 println!(" - Knowledge distillation preserves previous task performance");
319 println!(" - Temperature scaling provides soft targets");
320 println!(" - Balances plasticity and stability");
321
322 Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327 let layers = vec![
328 QNNLayerType::EncodingLayer { num_features: 4 },
329 QNNLayerType::VariationalLayer { num_params: 16 },
330 QNNLayerType::EntanglementLayer {
331 connectivity: "full".to_string(),
332 },
333 QNNLayerType::MeasurementLayer {
334 measurement_basis: "computational".to_string(),
335 },
336 ];
337
338 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340 let strategy = ContinualLearningStrategy::ParameterIsolation {
341 allocation_strategy: ParameterAllocationStrategy::Masking,
342 growth_threshold: 0.8,
343 };
344
345 let mut learner = QuantumContinualLearner::new(model, strategy);
346
347 println!(" Created Parameter Isolation learner:");
348 println!(" - Allocation strategy: Masking");
349 println!(" - Growth threshold: 0.8");
350
351 // Generate tasks with different requirements
352 let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354 println!("\n Learning with parameter isolation...");
355
356 let mut optimizer = Adam::new(0.001);
357 let mut parameter_usage = Vec::new();
358
359 for (i, task) in tasks.iter().enumerate() {
360 println!(" \n Allocating parameters for {}...", task.task_id);
361
362 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364 // Simulate parameter usage tracking
365 let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366 parameter_usage.push(used_params);
367
368 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
369 println!(" - Parameters allocated: {}/{}", used_params, 16);
370 println!(
371 " - Parameter efficiency: {:.1}%",
372 used_params as f64 / 16.0 * 100.0
373 );
374
375 if i > 0 {
376 let all_accuracies = learner.evaluate_all_tasks()?;
377 let interference = 1.0
378 - all_accuracies
379 .values()
380 .take(i)
381 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382 .sum::<f64>()
383 / i as f64;
384
385 println!(" - Task interference: {:.1}%", interference * 100.0);
386 }
387 }
388
389 println!("\n Parameter Isolation Results:");
390 println!(" - Dedicated parameters prevent interference");
391 println!(" - Scalable to many tasks");
392 println!(" - Maintains task-specific knowledge");
393
394 Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399 println!(" Comprehensive continual learning evaluation...");
400
401 // Compare different strategies
402 let strategies = vec![
403 (
404 "EWC",
405 ContinualLearningStrategy::ElasticWeightConsolidation {
406 importance_weight: 500.0,
407 fisher_samples: 100,
408 },
409 ),
410 (
411 "Experience Replay",
412 ContinualLearningStrategy::ExperienceReplay {
413 buffer_size: 300,
414 replay_ratio: 0.2,
415 memory_selection: MemorySelectionStrategy::Random,
416 },
417 ),
418 (
419 "Quantum Regularization",
420 ContinualLearningStrategy::QuantumRegularization {
421 entanglement_preservation: 0.1,
422 parameter_drift_penalty: 0.5,
423 },
424 ),
425 ];
426
427 // Generate challenging task sequence
428 let tasks = generate_challenging_sequence(5, 60, 4);
429
430 println!(
431 "\n Comparing strategies on {} challenging tasks:",
432 tasks.len()
433 );
434
435 for (strategy_name, strategy) in strategies {
436 println!("\n --- {strategy_name} ---");
437
438 let layers = vec![
439 QNNLayerType::EncodingLayer { num_features: 4 },
440 QNNLayerType::VariationalLayer { num_params: 8 },
441 QNNLayerType::MeasurementLayer {
442 measurement_basis: "computational".to_string(),
443 },
444 ];
445
446 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447 let mut learner = QuantumContinualLearner::new(model, strategy);
448 let mut optimizer = Adam::new(0.001);
449
450 for task in &tasks {
451 learner.learn_task(task.clone(), &mut optimizer, 20)?;
452 }
453
454 let final_metrics = learner.get_forgetting_metrics();
455 println!(
456 " - Average accuracy: {:.3}",
457 final_metrics.average_accuracy
458 );
459 println!(
460 " - Forgetting measure: {:.3}",
461 final_metrics.forgetting_measure
462 );
463 println!(
464 " - CL score: {:.3}",
465 final_metrics.continual_learning_score
466 );
467 }
468
469 Ok(())
470}Sourcepub fn get_task_metrics(&self) -> &HashMap<String, TaskMetrics>
pub fn get_task_metrics(&self) -> &HashMap<String, TaskMetrics>
Get task metrics
Sourcepub fn get_model(&self) -> &QuantumNeuralNetwork
pub fn get_model(&self) -> &QuantumNeuralNetwork
Get current model
Auto Trait Implementations§
impl Freeze for QuantumContinualLearner
impl RefUnwindSafe for QuantumContinualLearner
impl Send for QuantumContinualLearner
impl Sync for QuantumContinualLearner
impl Unpin for QuantumContinualLearner
impl UnwindSafe for QuantumContinualLearner
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.