1#![allow(clippy::pedantic, clippy::unnecessary_wraps)]
2use quantrs2_ml::autodiff::optimizers::Adam;
8use quantrs2_ml::prelude::*;
9use quantrs2_ml::qnn::QNNLayerType;
10use scirs2_core::ndarray::{Array1, Array2};
11use scirs2_core::random::prelude::*;
12
13fn main() -> Result<()> {
14 println!("=== Quantum Continual Learning Demo ===\n");
15
16 println!("1. Elastic Weight Consolidation (EWC)...");
18 ewc_demo()?;
19
20 println!("\n2. Experience Replay...");
22 experience_replay_demo()?;
23
24 println!("\n3. Progressive Networks...");
26 progressive_networks_demo()?;
27
28 println!("\n4. Learning without Forgetting...");
30 lwf_demo()?;
31
32 println!("\n5. Parameter Isolation...");
34 parameter_isolation_demo()?;
35
36 println!("\n6. Task Sequence Evaluation...");
38 task_sequence_demo()?;
39
40 println!("\n7. Forgetting Analysis...");
42 forgetting_analysis_demo()?;
43
44 println!("\n=== Quantum Continual Learning Demo Complete ===");
45
46 Ok(())
47}
48
49fn ewc_demo() -> Result<()> {
51 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 12 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65
66 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
68 importance_weight: 1000.0,
69 fisher_samples: 200,
70 };
71
72 let mut learner = QuantumContinualLearner::new(model, strategy);
73
74 println!(" Created EWC continual learner:");
75 println!(" - Importance weight: 1000.0");
76 println!(" - Fisher samples: 200");
77
78 let tasks = generate_task_sequence(3, 100, 4);
80
81 println!("\n Learning sequence of {} tasks...", tasks.len());
82
83 let mut optimizer = Adam::new(0.001);
84 let mut task_accuracies = Vec::new();
85
86 for (i, task) in tasks.iter().enumerate() {
87 println!(" \n Training on {}...", task.task_id);
88
89 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
90 task_accuracies.push(metrics.current_accuracy);
91
92 println!(" - Current accuracy: {:.3}", metrics.current_accuracy);
93
94 if i > 0 {
96 let all_accuracies = learner.evaluate_all_tasks()?;
97 let avg_prev_accuracy = all_accuracies
98 .iter()
99 .take(i)
100 .map(|(_, &acc)| acc)
101 .sum::<f64>()
102 / i as f64;
103
104 println!(" - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
105 }
106 }
107
108 let forgetting_metrics = learner.get_forgetting_metrics();
110 println!("\n EWC Results:");
111 println!(
112 " - Average accuracy: {:.3}",
113 forgetting_metrics.average_accuracy
114 );
115 println!(
116 " - Forgetting measure: {:.3}",
117 forgetting_metrics.forgetting_measure
118 );
119 println!(
120 " - Continual learning score: {:.3}",
121 forgetting_metrics.continual_learning_score
122 );
123
124 Ok(())
125}
126
127fn experience_replay_demo() -> Result<()> {
129 let layers = vec![
130 QNNLayerType::EncodingLayer { num_features: 4 },
131 QNNLayerType::VariationalLayer { num_params: 8 },
132 QNNLayerType::MeasurementLayer {
133 measurement_basis: "computational".to_string(),
134 },
135 ];
136
137 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
138
139 let strategy = ContinualLearningStrategy::ExperienceReplay {
140 buffer_size: 500,
141 replay_ratio: 0.3,
142 memory_selection: MemorySelectionStrategy::Random,
143 };
144
145 let mut learner = QuantumContinualLearner::new(model, strategy);
146
147 println!(" Created Experience Replay learner:");
148 println!(" - Buffer size: 500");
149 println!(" - Replay ratio: 30%");
150 println!(" - Selection: Random");
151
152 let tasks = generate_diverse_tasks(4, 80, 4);
154
155 println!("\n Learning {} diverse tasks...", tasks.len());
156
157 let mut optimizer = Adam::new(0.002);
158
159 for (i, task) in tasks.iter().enumerate() {
160 println!(" \n Learning {}...", task.task_id);
161
162 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
163
164 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
165
166 println!(" - Memory buffer usage: replay experiences stored");
168
169 if i > 0 {
170 let all_accuracies = learner.evaluate_all_tasks()?;
171 let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
172 println!(" - Average retention: {retention_rate:.3}");
173 }
174 }
175
176 let final_metrics = learner.get_forgetting_metrics();
177 println!("\n Experience Replay Results:");
178 println!(
179 " - Final average accuracy: {:.3}",
180 final_metrics.average_accuracy
181 );
182 println!(
183 " - Forgetting reduction: {:.3}",
184 1.0 - final_metrics.forgetting_measure
185 );
186
187 Ok(())
188}
189
190fn progressive_networks_demo() -> Result<()> {
192 let layers = vec![
193 QNNLayerType::EncodingLayer { num_features: 4 },
194 QNNLayerType::VariationalLayer { num_params: 6 },
195 QNNLayerType::MeasurementLayer {
196 measurement_basis: "computational".to_string(),
197 },
198 ];
199
200 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
201
202 let strategy = ContinualLearningStrategy::ProgressiveNetworks {
203 lateral_connections: true,
204 adaptation_layers: 2,
205 };
206
207 let mut learner = QuantumContinualLearner::new(model, strategy);
208
209 println!(" Created Progressive Networks learner:");
210 println!(" - Lateral connections: enabled");
211 println!(" - Adaptation layers: 2");
212
213 let tasks = generate_related_tasks(3, 60, 4);
215
216 println!("\n Learning {} related tasks...", tasks.len());
217
218 let mut optimizer = Adam::new(0.001);
219 let mut learning_speeds = Vec::new();
220
221 for (i, task) in tasks.iter().enumerate() {
222 println!(" \n Adding column for {}...", task.task_id);
223
224 let start_time = std::time::Instant::now();
225 let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
226 let learning_time = start_time.elapsed();
227
228 learning_speeds.push(learning_time);
229
230 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
231 println!(" - Learning time: {learning_time:.2?}");
232
233 if i > 0 {
234 let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
235 println!(" - Learning speedup: {speedup:.2}x");
236 }
237 }
238
239 println!("\n Progressive Networks Results:");
240 println!(" - No catastrophic forgetting (by design)");
241 println!(" - Lateral connections enable knowledge transfer");
242 println!(" - Model capacity grows with new tasks");
243
244 Ok(())
245}
246
247fn lwf_demo() -> Result<()> {
249 let layers = vec![
250 QNNLayerType::EncodingLayer { num_features: 4 },
251 QNNLayerType::VariationalLayer { num_params: 10 },
252 QNNLayerType::EntanglementLayer {
253 connectivity: "circular".to_string(),
254 },
255 QNNLayerType::MeasurementLayer {
256 measurement_basis: "computational".to_string(),
257 },
258 ];
259
260 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
261
262 let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
263 distillation_weight: 0.5,
264 temperature: 3.0,
265 };
266
267 let mut learner = QuantumContinualLearner::new(model, strategy);
268
269 println!(" Created Learning without Forgetting learner:");
270 println!(" - Distillation weight: 0.5");
271 println!(" - Temperature: 3.0");
272
273 let tasks = generate_task_sequence(4, 70, 4);
275
276 println!("\n Learning with knowledge distillation...");
277
278 let mut optimizer = Adam::new(0.001);
279 let mut distillation_losses = Vec::new();
280
281 for (i, task) in tasks.iter().enumerate() {
282 println!(" \n Learning {}...", task.task_id);
283
284 let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
285
286 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
287
288 if i > 0 {
289 let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
291 distillation_losses.push(distillation_loss);
292 println!(" - Distillation loss: {distillation_loss:.3}");
293
294 let all_accuracies = learner.evaluate_all_tasks()?;
295 let stability = all_accuracies
296 .values()
297 .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
298 .sum::<f64>()
299 / all_accuracies.len() as f64;
300
301 println!(" - Knowledge retention: {:.1}%", stability * 100.0);
302 }
303 }
304
305 println!("\n LwF Results:");
306 println!(" - Knowledge distillation preserves previous task performance");
307 println!(" - Temperature scaling provides soft targets");
308 println!(" - Balances plasticity and stability");
309
310 Ok(())
311}
312
313fn parameter_isolation_demo() -> Result<()> {
315 let layers = vec![
316 QNNLayerType::EncodingLayer { num_features: 4 },
317 QNNLayerType::VariationalLayer { num_params: 16 },
318 QNNLayerType::EntanglementLayer {
319 connectivity: "full".to_string(),
320 },
321 QNNLayerType::MeasurementLayer {
322 measurement_basis: "computational".to_string(),
323 },
324 ];
325
326 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
327
328 let strategy = ContinualLearningStrategy::ParameterIsolation {
329 allocation_strategy: ParameterAllocationStrategy::Masking,
330 growth_threshold: 0.8,
331 };
332
333 let mut learner = QuantumContinualLearner::new(model, strategy);
334
335 println!(" Created Parameter Isolation learner:");
336 println!(" - Allocation strategy: Masking");
337 println!(" - Growth threshold: 0.8");
338
339 let tasks = generate_varying_complexity_tasks(3, 90, 4);
341
342 println!("\n Learning with parameter isolation...");
343
344 let mut optimizer = Adam::new(0.001);
345 let mut parameter_usage = Vec::new();
346
347 for (i, task) in tasks.iter().enumerate() {
348 println!(" \n Allocating parameters for {}...", task.task_id);
349
350 let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
351
352 let used_params = 16 * (i + 1) / tasks.len(); parameter_usage.push(used_params);
355
356 println!(" - Task accuracy: {:.3}", metrics.current_accuracy);
357 println!(" - Parameters allocated: {}/{}", used_params, 16);
358 println!(
359 " - Parameter efficiency: {:.1}%",
360 used_params as f64 / 16.0 * 100.0
361 );
362
363 if i > 0 {
364 let all_accuracies = learner.evaluate_all_tasks()?;
365 let interference = 1.0
366 - all_accuracies
367 .values()
368 .take(i)
369 .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
370 .sum::<f64>()
371 / i as f64;
372
373 println!(" - Task interference: {:.1}%", interference * 100.0);
374 }
375 }
376
377 println!("\n Parameter Isolation Results:");
378 println!(" - Dedicated parameters prevent interference");
379 println!(" - Scalable to many tasks");
380 println!(" - Maintains task-specific knowledge");
381
382 Ok(())
383}
384
385fn task_sequence_demo() -> Result<()> {
387 println!(" Comprehensive continual learning evaluation...");
388
389 let strategies = vec![
391 (
392 "EWC",
393 ContinualLearningStrategy::ElasticWeightConsolidation {
394 importance_weight: 500.0,
395 fisher_samples: 100,
396 },
397 ),
398 (
399 "Experience Replay",
400 ContinualLearningStrategy::ExperienceReplay {
401 buffer_size: 300,
402 replay_ratio: 0.2,
403 memory_selection: MemorySelectionStrategy::Random,
404 },
405 ),
406 (
407 "Quantum Regularization",
408 ContinualLearningStrategy::QuantumRegularization {
409 entanglement_preservation: 0.1,
410 parameter_drift_penalty: 0.5,
411 },
412 ),
413 ];
414
415 let tasks = generate_challenging_sequence(5, 60, 4);
417
418 println!(
419 "\n Comparing strategies on {} challenging tasks:",
420 tasks.len()
421 );
422
423 for (strategy_name, strategy) in strategies {
424 println!("\n --- {strategy_name} ---");
425
426 let layers = vec![
427 QNNLayerType::EncodingLayer { num_features: 4 },
428 QNNLayerType::VariationalLayer { num_params: 8 },
429 QNNLayerType::MeasurementLayer {
430 measurement_basis: "computational".to_string(),
431 },
432 ];
433
434 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
435 let mut learner = QuantumContinualLearner::new(model, strategy);
436 let mut optimizer = Adam::new(0.001);
437
438 for task in &tasks {
439 learner.learn_task(task.clone(), &mut optimizer, 20)?;
440 }
441
442 let final_metrics = learner.get_forgetting_metrics();
443 println!(
444 " - Average accuracy: {:.3}",
445 final_metrics.average_accuracy
446 );
447 println!(
448 " - Forgetting measure: {:.3}",
449 final_metrics.forgetting_measure
450 );
451 println!(
452 " - CL score: {:.3}",
453 final_metrics.continual_learning_score
454 );
455 }
456
457 Ok(())
458}
459
460fn forgetting_analysis_demo() -> Result<()> {
462 println!(" Detailed forgetting analysis...");
463
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 12 },
467 QNNLayerType::MeasurementLayer {
468 measurement_basis: "computational".to_string(),
469 },
470 ];
471
472 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
473
474 let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
475 importance_weight: 1000.0,
476 fisher_samples: 150,
477 };
478
479 let mut learner = QuantumContinualLearner::new(model, strategy);
480
481 let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
483
484 println!("\n Learning tasks with increasing difficulty...");
485
486 let mut optimizer = Adam::new(0.001);
487 let mut accuracy_matrix = Vec::new();
488
489 for (i, task) in tasks.iter().enumerate() {
490 println!(
491 " \n Learning {} (difficulty level {})...",
492 task.task_id,
493 i + 1
494 );
495
496 learner.learn_task(task.clone(), &mut optimizer, 25)?;
497
498 let all_accuracies = learner.evaluate_all_tasks()?;
500 let mut current_row = Vec::new();
501
502 for j in 0..=i {
503 let task_id = &tasks[j].task_id;
504 let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
505 current_row.push(*accuracy);
506 }
507
508 accuracy_matrix.push(current_row.clone());
509
510 for (j, &acc) in current_row.iter().enumerate() {
512 println!(" - Task {}: {:.3}", j + 1, acc);
513 }
514 }
515
516 println!("\n Forgetting Analysis Results:");
517
518 for i in 1..accuracy_matrix.len() {
520 for j in 0..i {
521 let current_acc = accuracy_matrix[i][j];
522 let original_acc = accuracy_matrix[j][j];
523 let forgetting = (original_acc - current_acc).max(0.0);
524
525 if forgetting > 0.1 {
526 println!(" - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
527 j + 1, i + 1, forgetting);
528 }
529 }
530 }
531
532 let mut total_forgetting = 0.0;
534 let mut num_comparisons = 0;
535
536 for i in 1..accuracy_matrix.len() {
537 for j in 0..i {
538 let current_acc = accuracy_matrix[i][j];
539 let original_acc = accuracy_matrix[j][j];
540 total_forgetting += (original_acc - current_acc).max(0.0);
541 num_comparisons += 1;
542 }
543 }
544
545 let avg_forgetting = if num_comparisons > 0 {
546 total_forgetting / f64::from(num_comparisons)
547 } else {
548 0.0
549 };
550
551 println!(" - Average forgetting: {avg_forgetting:.3}");
552
553 if let Some(final_row) = accuracy_matrix.last() {
555 let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
556 println!(" - Final average accuracy: {final_avg:.3}");
557 println!(
558 " - Continual learning effectiveness: {:.1}%",
559 (1.0 - avg_forgetting) * 100.0
560 );
561 }
562
563 Ok(())
564}
565
566fn generate_diverse_tasks(
568 num_tasks: usize,
569 samples_per_task: usize,
570 feature_dim: usize,
571) -> Vec<ContinualTask> {
572 let mut tasks = Vec::new();
573
574 for i in 0..num_tasks {
575 let task_type = match i % 3 {
576 0 => "classification",
577 1 => "pattern_recognition",
578 _ => "feature_detection",
579 };
580
581 let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
583 match i % 3 {
584 0 => {
585 let center = i as f64 * 0.2;
587 0.2f64.mul_add(fastrand::f64() - 0.5, center)
588 }
589 1 => {
590 let freq = (i + 1) as f64;
592 0.3f64.mul_add(
593 (freq * row as f64).mul_add(0.1, col as f64 * 0.2).sin(),
594 0.5,
595 )
596 }
597 _ => {
598 let bias = i as f64 * 0.1;
600 fastrand::f64().mul_add(0.6, bias)
601 }
602 }
603 });
604
605 let labels = Array1::from_shape_fn(samples_per_task, |row| {
606 let features_sum = data.row(row).sum();
607 usize::from(features_sum > feature_dim as f64 * 0.5)
608 });
609
610 let task = create_continual_task(
611 format!("{task_type}_{i}"),
612 TaskType::Classification { num_classes: 2 },
613 data,
614 labels,
615 0.8,
616 );
617
618 tasks.push(task);
619 }
620
621 tasks
622}
623
624fn generate_related_tasks(
626 num_tasks: usize,
627 samples_per_task: usize,
628 feature_dim: usize,
629) -> Vec<ContinualTask> {
630 let mut tasks = Vec::new();
631 let base_pattern = Array1::from_shape_fn(feature_dim, |i| (i as f64 * 0.3).sin());
632
633 for i in 0..num_tasks {
634 let variation_strength = (i as f64).mul_add(0.1, 0.1);
636
637 let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
638 let base_value = base_pattern[col];
639 let variation = variation_strength * (row as f64).mul_add(0.05, col as f64 * 0.1).cos();
640 let noise = 0.05 * (fastrand::f64() - 0.5);
641 (base_value + variation + noise).max(0.0).min(1.0)
642 });
643
644 let labels = Array1::from_shape_fn(samples_per_task, |row| {
645 let correlation = data
646 .row(row)
647 .iter()
648 .zip(base_pattern.iter())
649 .map(|(&x, &y)| x * y)
650 .sum::<f64>();
651 usize::from(correlation > 0.5)
652 });
653
654 let task = create_continual_task(
655 format!("related_task_{i}"),
656 TaskType::Classification { num_classes: 2 },
657 data,
658 labels,
659 0.8,
660 );
661
662 tasks.push(task);
663 }
664
665 tasks
666}
667
668fn generate_varying_complexity_tasks(
670 num_tasks: usize,
671 samples_per_task: usize,
672 feature_dim: usize,
673) -> Vec<ContinualTask> {
674 let mut tasks = Vec::new();
675
676 for i in 0..num_tasks {
677 let complexity = (i + 1) as f64; let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
680 let x = row as f64 / samples_per_task as f64;
682 let y = col as f64 / feature_dim as f64;
683
684 let value = match i {
685 0 => {
686 if x > 0.5 {
687 1.0
688 } else {
689 0.0
690 }
691 } 1 => {
693 if x.mul_add(x, y * y) > 0.25 {
694 1.0
695 } else {
696 0.0
697 }
698 } 2 => {
700 if (x * 4.0).sin() * (y * 4.0).cos() > 0.0 {
701 1.0
702 } else {
703 0.0
704 }
705 } _ => {
707 let pattern = (x * 8.0)
709 .sin()
710 .mul_add((y * 8.0).cos(), (x * y * 16.0).sin());
711 if pattern > 0.0 {
712 1.0
713 } else {
714 0.0
715 }
716 }
717 };
718
719 0.1f64.mul_add(fastrand::f64() - 0.5, value) });
721
722 let labels = Array1::from_shape_fn(samples_per_task, |row| {
723 let features = data.row(row);
725 let decision_value = features
726 .iter()
727 .enumerate()
728 .map(|(j, &x)| x * (j as f64 * complexity).mul_add(0.1, 1.0))
729 .sum::<f64>();
730
731 usize::from(decision_value > feature_dim as f64 * 0.5)
732 });
733
734 let task = create_continual_task(
735 format!("complex_task_{i}"),
736 TaskType::Classification { num_classes: 2 },
737 data,
738 labels,
739 0.8,
740 );
741
742 tasks.push(task);
743 }
744
745 tasks
746}
747
748fn generate_challenging_sequence(
750 num_tasks: usize,
751 samples_per_task: usize,
752 feature_dim: usize,
753) -> Vec<ContinualTask> {
754 let mut tasks = Vec::new();
755
756 for i in 0..num_tasks {
757 let challenge_type = i % 4;
759
760 let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
761 match challenge_type {
762 0 => {
763 let freq = (i as f64).mul_add(2.0, 10.0);
765 0.4f64.mul_add((freq * row as f64 * 0.01).sin(), 0.5)
766 }
767 1 => {
768 let center1 = (i as f64).mul_add(0.05, 0.3);
770 let center2 = (i as f64).mul_add(-0.05, 0.7);
771 if row % 2 == 0 {
772 0.15f64.mul_add(fastrand::f64() - 0.5, center1)
773 } else {
774 0.15f64.mul_add(fastrand::f64() - 0.5, center2)
775 }
776 }
777 2 => {
778 let x = row as f64 / samples_per_task as f64;
780 let y = col as f64 / feature_dim as f64;
781 let pattern = (i as f64).mul_add(0.1, x.mul_add(x, -(y * y))).tanh();
782 0.3f64.mul_add(pattern, 0.5)
783 }
784 _ => {
785 if fastrand::f64() < 0.2 {
787 0.2f64.mul_add(fastrand::f64(), 0.8)
788 } else {
789 0.1 * fastrand::f64()
790 }
791 }
792 }
793 });
794
795 let labels = Array1::from_shape_fn(samples_per_task, |row| {
796 let features = data.row(row);
797 match challenge_type {
798 0 => usize::from(features.sum() > feature_dim as f64 * 0.5),
799 1 => usize::from(features[0] > 0.5),
800 2 => usize::from(
801 features
802 .iter()
803 .enumerate()
804 .map(|(j, &x)| x * (j as f64 + 1.0))
805 .sum::<f64>()
806 > 2.0,
807 ),
808 _ => usize::from(features.iter().filter(|&&x| x > 0.5).count() > feature_dim / 2),
809 }
810 });
811
812 let task = create_continual_task(
813 format!("challenge_{i}"),
814 TaskType::Classification { num_classes: 2 },
815 data,
816 labels,
817 0.8,
818 );
819
820 tasks.push(task);
821 }
822
823 tasks
824}
825
826fn generate_increasing_difficulty_tasks(
828 num_tasks: usize,
829 samples_per_task: usize,
830 feature_dim: usize,
831) -> Vec<ContinualTask> {
832 let mut tasks = Vec::new();
833
834 for i in 0..num_tasks {
835 let difficulty = (i + 1) as f64;
836 let noise_level = 0.05 + difficulty * 0.02;
837 let pattern_complexity = 1.0 + difficulty * 0.5;
838
839 let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
840 let x = row as f64 / samples_per_task as f64;
841 let y = col as f64 / feature_dim as f64;
842
843 let base_pattern = (x * pattern_complexity * std::f64::consts::PI).sin()
845 * (y * pattern_complexity * std::f64::consts::PI).cos();
846
847 let pattern_value = 0.3f64.mul_add(base_pattern, 0.5);
848 let noise = noise_level * (fastrand::f64() - 0.5);
849
850 (pattern_value + noise).max(0.0).min(1.0)
851 });
852
853 let labels = Array1::from_shape_fn(samples_per_task, |row| {
854 let features = data.row(row);
855
856 let decision_value = features
858 .iter()
859 .enumerate()
860 .map(|(j, &x)| {
861 let weight = 1.0 + (j as f64 * difficulty * 0.1).sin();
862 x * weight
863 })
864 .sum::<f64>();
865
866 let threshold = feature_dim as f64 * 0.5 * (1.0 + difficulty * 0.1);
867 usize::from(decision_value > threshold)
868 });
869
870 let task = create_continual_task(
871 format!("difficulty_{}", i + 1),
872 TaskType::Classification { num_classes: 2 },
873 data,
874 labels,
875 0.8,
876 );
877
878 tasks.push(task);
879 }
880
881 tasks
882}