pub fn generate_task_sequence(
num_tasks: usize,
samples_per_task: usize,
feature_dim: usize,
) -> Vec<ContinualTask>Expand description
Helper function to generate synthetic task sequence
Examples found in repository?
examples/quantum_continual_learning.rs (line 79)
50fn ewc_demo() -> Result<()> {
51 // Create quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 // Create EWC strategy
67 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 // Generate task sequence
79 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 // Evaluate forgetting on previous tasks
95 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 // Final evaluation
109 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127/// Demonstrate Experience Replay
128fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 // Generate diverse tasks
153 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 // Show memory buffer status
167 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190/// Demonstrate Progressive Networks
191fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 // Generate related tasks for transfer learning
214 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247/// Demonstrate Learning without Forgetting
248fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 // Generate task sequence
274 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 // Simulate distillation loss tracking
290 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}