quantum_continual_learning/
quantum_continual_learning.rs

1#![allow(
2    clippy::pedantic,
3    clippy::unnecessary_wraps,
4    clippy::needless_range_loop,
5    clippy::useless_vec,
6    clippy::needless_collect,
7    clippy::too_many_arguments,
8    clippy::let_and_return,
9    clippy::needless_pass_by_ref_mut,
10    clippy::manual_clamp,
11    clippy::collection_is_never_read
12)]
13#![allow(dead_code)]
14//! Quantum Continual Learning Example
15//!
16//! This example demonstrates various continual learning strategies for quantum neural networks,
17//! including Elastic Weight Consolidation, Experience Replay, Progressive Networks, and more.
18
19use quantrs2_ml::autodiff::optimizers::Adam;
20use quantrs2_ml::prelude::*;
21use quantrs2_ml::qnn::QNNLayerType;
22use scirs2_core::ndarray::{Array1, Array2};
23use scirs2_core::random::prelude::*;
24
25fn main() -> Result<()> {
26    println!("=== Quantum Continual Learning Demo ===\n");
27
28    // Step 1: Elastic Weight Consolidation (EWC)
29    println!("1. Elastic Weight Consolidation (EWC)...");
30    ewc_demo()?;
31
32    // Step 2: Experience Replay
33    println!("\n2. Experience Replay...");
34    experience_replay_demo()?;
35
36    // Step 3: Progressive Networks
37    println!("\n3. Progressive Networks...");
38    progressive_networks_demo()?;
39
40    // Step 4: Learning without Forgetting (LwF)
41    println!("\n4. Learning without Forgetting...");
42    lwf_demo()?;
43
44    // Step 5: Parameter Isolation
45    println!("\n5. Parameter Isolation...");
46    parameter_isolation_demo()?;
47
48    // Step 6: Task sequence evaluation
49    println!("\n6. Task Sequence Evaluation...");
50    task_sequence_demo()?;
51
52    // Step 7: Forgetting analysis
53    println!("\n7. Forgetting Analysis...");
54    forgetting_analysis_demo()?;
55
56    println!("\n=== Quantum Continual Learning Demo Complete ===");
57
58    Ok(())
59}
60
61/// Demonstrate Elastic Weight Consolidation
62fn ewc_demo() -> Result<()> {
63    // Create quantum model
64    let layers = vec![
65        QNNLayerType::EncodingLayer { num_features: 4 },
66        QNNLayerType::VariationalLayer { num_params: 12 },
67        QNNLayerType::EntanglementLayer {
68            connectivity: "circular".to_string(),
69        },
70        QNNLayerType::VariationalLayer { num_params: 8 },
71        QNNLayerType::MeasurementLayer {
72            measurement_basis: "computational".to_string(),
73        },
74    ];
75
76    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
77
78    // Create EWC strategy
79    let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
80        importance_weight: 1000.0,
81        fisher_samples: 200,
82    };
83
84    let mut learner = QuantumContinualLearner::new(model, strategy);
85
86    println!("   Created EWC continual learner:");
87    println!("   - Importance weight: 1000.0");
88    println!("   - Fisher samples: 200");
89
90    // Generate task sequence
91    let tasks = generate_task_sequence(3, 100, 4);
92
93    println!("\n   Learning sequence of {} tasks...", tasks.len());
94
95    let mut optimizer = Adam::new(0.001);
96    let mut task_accuracies = Vec::new();
97
98    for (i, task) in tasks.iter().enumerate() {
99        println!("   \n   Training on {}...", task.task_id);
100
101        let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
102        task_accuracies.push(metrics.current_accuracy);
103
104        println!("   - Current accuracy: {:.3}", metrics.current_accuracy);
105
106        // Evaluate forgetting on previous tasks
107        if i > 0 {
108            let all_accuracies = learner.evaluate_all_tasks()?;
109            let avg_prev_accuracy = all_accuracies
110                .iter()
111                .take(i)
112                .map(|(_, &acc)| acc)
113                .sum::<f64>()
114                / i as f64;
115
116            println!("   - Average accuracy on previous tasks: {avg_prev_accuracy:.3}");
117        }
118    }
119
120    // Final evaluation
121    let forgetting_metrics = learner.get_forgetting_metrics();
122    println!("\n   EWC Results:");
123    println!(
124        "   - Average accuracy: {:.3}",
125        forgetting_metrics.average_accuracy
126    );
127    println!(
128        "   - Forgetting measure: {:.3}",
129        forgetting_metrics.forgetting_measure
130    );
131    println!(
132        "   - Continual learning score: {:.3}",
133        forgetting_metrics.continual_learning_score
134    );
135
136    Ok(())
137}
138
139/// Demonstrate Experience Replay
140fn experience_replay_demo() -> Result<()> {
141    let layers = vec![
142        QNNLayerType::EncodingLayer { num_features: 4 },
143        QNNLayerType::VariationalLayer { num_params: 8 },
144        QNNLayerType::MeasurementLayer {
145            measurement_basis: "computational".to_string(),
146        },
147    ];
148
149    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
150
151    let strategy = ContinualLearningStrategy::ExperienceReplay {
152        buffer_size: 500,
153        replay_ratio: 0.3,
154        memory_selection: MemorySelectionStrategy::Random,
155    };
156
157    let mut learner = QuantumContinualLearner::new(model, strategy);
158
159    println!("   Created Experience Replay learner:");
160    println!("   - Buffer size: 500");
161    println!("   - Replay ratio: 30%");
162    println!("   - Selection: Random");
163
164    // Generate diverse tasks
165    let tasks = generate_diverse_tasks(4, 80, 4);
166
167    println!("\n   Learning {} diverse tasks...", tasks.len());
168
169    let mut optimizer = Adam::new(0.002);
170
171    for (i, task) in tasks.iter().enumerate() {
172        println!("   \n   Learning {}...", task.task_id);
173
174        let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
175
176        println!("   - Task accuracy: {:.3}", metrics.current_accuracy);
177
178        // Show memory buffer status
179        println!("   - Memory buffer usage: replay experiences stored");
180
181        if i > 0 {
182            let all_accuracies = learner.evaluate_all_tasks()?;
183            let retention_rate = all_accuracies.values().sum::<f64>() / all_accuracies.len() as f64;
184            println!("   - Average retention: {retention_rate:.3}");
185        }
186    }
187
188    let final_metrics = learner.get_forgetting_metrics();
189    println!("\n   Experience Replay Results:");
190    println!(
191        "   - Final average accuracy: {:.3}",
192        final_metrics.average_accuracy
193    );
194    println!(
195        "   - Forgetting reduction: {:.3}",
196        1.0 - final_metrics.forgetting_measure
197    );
198
199    Ok(())
200}
201
202/// Demonstrate Progressive Networks
203fn progressive_networks_demo() -> Result<()> {
204    let layers = vec![
205        QNNLayerType::EncodingLayer { num_features: 4 },
206        QNNLayerType::VariationalLayer { num_params: 6 },
207        QNNLayerType::MeasurementLayer {
208            measurement_basis: "computational".to_string(),
209        },
210    ];
211
212    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
213
214    let strategy = ContinualLearningStrategy::ProgressiveNetworks {
215        lateral_connections: true,
216        adaptation_layers: 2,
217    };
218
219    let mut learner = QuantumContinualLearner::new(model, strategy);
220
221    println!("   Created Progressive Networks learner:");
222    println!("   - Lateral connections: enabled");
223    println!("   - Adaptation layers: 2");
224
225    // Generate related tasks for transfer learning
226    let tasks = generate_related_tasks(3, 60, 4);
227
228    println!("\n   Learning {} related tasks...", tasks.len());
229
230    let mut optimizer = Adam::new(0.001);
231    let mut learning_speeds = Vec::new();
232
233    for (i, task) in tasks.iter().enumerate() {
234        println!("   \n   Adding column for {}...", task.task_id);
235
236        let start_time = std::time::Instant::now();
237        let metrics = learner.learn_task(task.clone(), &mut optimizer, 20)?;
238        let learning_time = start_time.elapsed();
239
240        learning_speeds.push(learning_time);
241
242        println!("   - Task accuracy: {:.3}", metrics.current_accuracy);
243        println!("   - Learning time: {learning_time:.2?}");
244
245        if i > 0 {
246            let speedup = learning_speeds[0].as_secs_f64() / learning_time.as_secs_f64();
247            println!("   - Learning speedup: {speedup:.2}x");
248        }
249    }
250
251    println!("\n   Progressive Networks Results:");
252    println!("   - No catastrophic forgetting (by design)");
253    println!("   - Lateral connections enable knowledge transfer");
254    println!("   - Model capacity grows with new tasks");
255
256    Ok(())
257}
258
259/// Demonstrate Learning without Forgetting
260fn lwf_demo() -> Result<()> {
261    let layers = vec![
262        QNNLayerType::EncodingLayer { num_features: 4 },
263        QNNLayerType::VariationalLayer { num_params: 10 },
264        QNNLayerType::EntanglementLayer {
265            connectivity: "circular".to_string(),
266        },
267        QNNLayerType::MeasurementLayer {
268            measurement_basis: "computational".to_string(),
269        },
270    ];
271
272    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
273
274    let strategy = ContinualLearningStrategy::LearningWithoutForgetting {
275        distillation_weight: 0.5,
276        temperature: 3.0,
277    };
278
279    let mut learner = QuantumContinualLearner::new(model, strategy);
280
281    println!("   Created Learning without Forgetting learner:");
282    println!("   - Distillation weight: 0.5");
283    println!("   - Temperature: 3.0");
284
285    // Generate task sequence
286    let tasks = generate_task_sequence(4, 70, 4);
287
288    println!("\n   Learning with knowledge distillation...");
289
290    let mut optimizer = Adam::new(0.001);
291    let mut distillation_losses = Vec::new();
292
293    for (i, task) in tasks.iter().enumerate() {
294        println!("   \n   Learning {}...", task.task_id);
295
296        let metrics = learner.learn_task(task.clone(), &mut optimizer, 25)?;
297
298        println!("   - Task accuracy: {:.3}", metrics.current_accuracy);
299
300        if i > 0 {
301            // Simulate distillation loss tracking
302            let distillation_loss = 0.3f64.mul_add(fastrand::f64(), 0.1);
303            distillation_losses.push(distillation_loss);
304            println!("   - Distillation loss: {distillation_loss:.3}");
305
306            let all_accuracies = learner.evaluate_all_tasks()?;
307            let stability = all_accuracies
308                .values()
309                .map(|&acc| if acc > 0.6 { 1.0 } else { 0.0 })
310                .sum::<f64>()
311                / all_accuracies.len() as f64;
312
313            println!("   - Knowledge retention: {:.1}%", stability * 100.0);
314        }
315    }
316
317    println!("\n   LwF Results:");
318    println!("   - Knowledge distillation preserves previous task performance");
319    println!("   - Temperature scaling provides soft targets");
320    println!("   - Balances plasticity and stability");
321
322    Ok(())
323}
324
325/// Demonstrate Parameter Isolation
326fn parameter_isolation_demo() -> Result<()> {
327    let layers = vec![
328        QNNLayerType::EncodingLayer { num_features: 4 },
329        QNNLayerType::VariationalLayer { num_params: 16 },
330        QNNLayerType::EntanglementLayer {
331            connectivity: "full".to_string(),
332        },
333        QNNLayerType::MeasurementLayer {
334            measurement_basis: "computational".to_string(),
335        },
336    ];
337
338    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
339
340    let strategy = ContinualLearningStrategy::ParameterIsolation {
341        allocation_strategy: ParameterAllocationStrategy::Masking,
342        growth_threshold: 0.8,
343    };
344
345    let mut learner = QuantumContinualLearner::new(model, strategy);
346
347    println!("   Created Parameter Isolation learner:");
348    println!("   - Allocation strategy: Masking");
349    println!("   - Growth threshold: 0.8");
350
351    // Generate tasks with different requirements
352    let tasks = generate_varying_complexity_tasks(3, 90, 4);
353
354    println!("\n   Learning with parameter isolation...");
355
356    let mut optimizer = Adam::new(0.001);
357    let mut parameter_usage = Vec::new();
358
359    for (i, task) in tasks.iter().enumerate() {
360        println!("   \n   Allocating parameters for {}...", task.task_id);
361
362        let metrics = learner.learn_task(task.clone(), &mut optimizer, 30)?;
363
364        // Simulate parameter usage tracking
365        let used_params = 16 * (i + 1) / tasks.len(); // Gradually use more parameters
366        parameter_usage.push(used_params);
367
368        println!("   - Task accuracy: {:.3}", metrics.current_accuracy);
369        println!("   - Parameters allocated: {}/{}", used_params, 16);
370        println!(
371            "   - Parameter efficiency: {:.1}%",
372            used_params as f64 / 16.0 * 100.0
373        );
374
375        if i > 0 {
376            let all_accuracies = learner.evaluate_all_tasks()?;
377            let interference = 1.0
378                - all_accuracies
379                    .values()
380                    .take(i)
381                    .map(|&acc| if acc > 0.7 { 1.0 } else { 0.0 })
382                    .sum::<f64>()
383                    / i as f64;
384
385            println!("   - Task interference: {:.1}%", interference * 100.0);
386        }
387    }
388
389    println!("\n   Parameter Isolation Results:");
390    println!("   - Dedicated parameters prevent interference");
391    println!("   - Scalable to many tasks");
392    println!("   - Maintains task-specific knowledge");
393
394    Ok(())
395}
396
397/// Demonstrate comprehensive task sequence evaluation
398fn task_sequence_demo() -> Result<()> {
399    println!("   Comprehensive continual learning evaluation...");
400
401    // Compare different strategies
402    let strategies = vec![
403        (
404            "EWC",
405            ContinualLearningStrategy::ElasticWeightConsolidation {
406                importance_weight: 500.0,
407                fisher_samples: 100,
408            },
409        ),
410        (
411            "Experience Replay",
412            ContinualLearningStrategy::ExperienceReplay {
413                buffer_size: 300,
414                replay_ratio: 0.2,
415                memory_selection: MemorySelectionStrategy::Random,
416            },
417        ),
418        (
419            "Quantum Regularization",
420            ContinualLearningStrategy::QuantumRegularization {
421                entanglement_preservation: 0.1,
422                parameter_drift_penalty: 0.5,
423            },
424        ),
425    ];
426
427    // Generate challenging task sequence
428    let tasks = generate_challenging_sequence(5, 60, 4);
429
430    println!(
431        "\n   Comparing strategies on {} challenging tasks:",
432        tasks.len()
433    );
434
435    for (strategy_name, strategy) in strategies {
436        println!("\n   --- {strategy_name} ---");
437
438        let layers = vec![
439            QNNLayerType::EncodingLayer { num_features: 4 },
440            QNNLayerType::VariationalLayer { num_params: 8 },
441            QNNLayerType::MeasurementLayer {
442                measurement_basis: "computational".to_string(),
443            },
444        ];
445
446        let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
447        let mut learner = QuantumContinualLearner::new(model, strategy);
448        let mut optimizer = Adam::new(0.001);
449
450        for task in &tasks {
451            learner.learn_task(task.clone(), &mut optimizer, 20)?;
452        }
453
454        let final_metrics = learner.get_forgetting_metrics();
455        println!(
456            "   - Average accuracy: {:.3}",
457            final_metrics.average_accuracy
458        );
459        println!(
460            "   - Forgetting measure: {:.3}",
461            final_metrics.forgetting_measure
462        );
463        println!(
464            "   - CL score: {:.3}",
465            final_metrics.continual_learning_score
466        );
467    }
468
469    Ok(())
470}
471
472/// Demonstrate forgetting analysis
473fn forgetting_analysis_demo() -> Result<()> {
474    println!("   Detailed forgetting analysis...");
475
476    let layers = vec![
477        QNNLayerType::EncodingLayer { num_features: 4 },
478        QNNLayerType::VariationalLayer { num_params: 12 },
479        QNNLayerType::MeasurementLayer {
480            measurement_basis: "computational".to_string(),
481        },
482    ];
483
484    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
485
486    let strategy = ContinualLearningStrategy::ElasticWeightConsolidation {
487        importance_weight: 1000.0,
488        fisher_samples: 150,
489    };
490
491    let mut learner = QuantumContinualLearner::new(model, strategy);
492
493    // Create tasks with increasing difficulty
494    let tasks = generate_increasing_difficulty_tasks(4, 80, 4);
495
496    println!("\n   Learning tasks with increasing difficulty...");
497
498    let mut optimizer = Adam::new(0.001);
499    let mut accuracy_matrix = Vec::new();
500
501    for (i, task) in tasks.iter().enumerate() {
502        println!(
503            "   \n   Learning {} (difficulty level {})...",
504            task.task_id,
505            i + 1
506        );
507
508        learner.learn_task(task.clone(), &mut optimizer, 25)?;
509
510        // Evaluate on all tasks learned so far
511        let all_accuracies = learner.evaluate_all_tasks()?;
512        let mut current_row = Vec::new();
513
514        for j in 0..=i {
515            let task_id = &tasks[j].task_id;
516            let accuracy = all_accuracies.get(task_id).unwrap_or(&0.0);
517            current_row.push(*accuracy);
518        }
519
520        accuracy_matrix.push(current_row.clone());
521
522        // Print current performance
523        for (j, &acc) in current_row.iter().enumerate() {
524            println!("   - Task {}: {:.3}", j + 1, acc);
525        }
526    }
527
528    println!("\n   Forgetting Analysis Results:");
529
530    // Compute backward transfer
531    for i in 1..accuracy_matrix.len() {
532        for j in 0..i {
533            let current_acc = accuracy_matrix[i][j];
534            let original_acc = accuracy_matrix[j][j];
535            let forgetting = (original_acc - current_acc).max(0.0);
536
537            if forgetting > 0.1 {
538                println!("   - Significant forgetting detected for Task {} after learning Task {}: {:.3}",
539                    j + 1, i + 1, forgetting);
540            }
541        }
542    }
543
544    // Compute average forgetting
545    let mut total_forgetting = 0.0;
546    let mut num_comparisons = 0;
547
548    for i in 1..accuracy_matrix.len() {
549        for j in 0..i {
550            let current_acc = accuracy_matrix[i][j];
551            let original_acc = accuracy_matrix[j][j];
552            total_forgetting += (original_acc - current_acc).max(0.0);
553            num_comparisons += 1;
554        }
555    }
556
557    let avg_forgetting = if num_comparisons > 0 {
558        total_forgetting / f64::from(num_comparisons)
559    } else {
560        0.0
561    };
562
563    println!("   - Average forgetting: {avg_forgetting:.3}");
564
565    // Compute final average accuracy
566    if let Some(final_row) = accuracy_matrix.last() {
567        let final_avg = final_row.iter().sum::<f64>() / final_row.len() as f64;
568        println!("   - Final average accuracy: {final_avg:.3}");
569        println!(
570            "   - Continual learning effectiveness: {:.1}%",
571            (1.0 - avg_forgetting) * 100.0
572        );
573    }
574
575    Ok(())
576}
577
578/// Generate diverse tasks with different characteristics
579fn generate_diverse_tasks(
580    num_tasks: usize,
581    samples_per_task: usize,
582    feature_dim: usize,
583) -> Vec<ContinualTask> {
584    let mut tasks = Vec::new();
585
586    for i in 0..num_tasks {
587        let task_type = match i % 3 {
588            0 => "classification",
589            1 => "pattern_recognition",
590            _ => "feature_detection",
591        };
592
593        // Generate task-specific data with different distributions
594        let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
595            match i % 3 {
596                0 => {
597                    // Gaussian-like distribution
598                    let center = i as f64 * 0.2;
599                    0.2f64.mul_add(fastrand::f64() - 0.5, center)
600                }
601                1 => {
602                    // Sinusoidal pattern
603                    let freq = (i + 1) as f64;
604                    0.3f64.mul_add(
605                        (freq * row as f64).mul_add(0.1, col as f64 * 0.2).sin(),
606                        0.5,
607                    )
608                }
609                _ => {
610                    // Random with task-specific bias
611                    let bias = i as f64 * 0.1;
612                    fastrand::f64().mul_add(0.6, bias)
613                }
614            }
615        });
616
617        let labels = Array1::from_shape_fn(samples_per_task, |row| {
618            let features_sum = data.row(row).sum();
619            usize::from(features_sum > feature_dim as f64 * 0.5)
620        });
621
622        let task = create_continual_task(
623            format!("{task_type}_{i}"),
624            TaskType::Classification { num_classes: 2 },
625            data,
626            labels,
627            0.8,
628        );
629
630        tasks.push(task);
631    }
632
633    tasks
634}
635
636/// Generate related tasks for transfer learning
637fn generate_related_tasks(
638    num_tasks: usize,
639    samples_per_task: usize,
640    feature_dim: usize,
641) -> Vec<ContinualTask> {
642    let mut tasks = Vec::new();
643    let base_pattern = Array1::from_shape_fn(feature_dim, |i| (i as f64 * 0.3).sin());
644
645    for i in 0..num_tasks {
646        // Each task is a variation of the base pattern
647        let variation_strength = (i as f64).mul_add(0.1, 0.1);
648
649        let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
650            let base_value = base_pattern[col];
651            let variation = variation_strength * (row as f64).mul_add(0.05, col as f64 * 0.1).cos();
652            let noise = 0.05 * (fastrand::f64() - 0.5);
653            (base_value + variation + noise).max(0.0).min(1.0)
654        });
655
656        let labels = Array1::from_shape_fn(samples_per_task, |row| {
657            let correlation = data
658                .row(row)
659                .iter()
660                .zip(base_pattern.iter())
661                .map(|(&x, &y)| x * y)
662                .sum::<f64>();
663            usize::from(correlation > 0.5)
664        });
665
666        let task = create_continual_task(
667            format!("related_task_{i}"),
668            TaskType::Classification { num_classes: 2 },
669            data,
670            labels,
671            0.8,
672        );
673
674        tasks.push(task);
675    }
676
677    tasks
678}
679
680/// Generate tasks with varying complexity
681fn generate_varying_complexity_tasks(
682    num_tasks: usize,
683    samples_per_task: usize,
684    feature_dim: usize,
685) -> Vec<ContinualTask> {
686    let mut tasks = Vec::new();
687
688    for i in 0..num_tasks {
689        let complexity = (i + 1) as f64; // Increasing complexity
690
691        let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
692            // More complex decision boundaries for later tasks
693            let x = row as f64 / samples_per_task as f64;
694            let y = col as f64 / feature_dim as f64;
695
696            let value = match i {
697                0 => {
698                    if x > 0.5 {
699                        1.0
700                    } else {
701                        0.0
702                    }
703                } // Simple linear
704                1 => {
705                    if x.mul_add(x, y * y) > 0.25 {
706                        1.0
707                    } else {
708                        0.0
709                    }
710                } // Circular
711                2 => {
712                    if (x * 4.0).sin() * (y * 4.0).cos() > 0.0 {
713                        1.0
714                    } else {
715                        0.0
716                    }
717                } // Sinusoidal
718                _ => {
719                    // Very complex pattern
720                    let pattern = (x * 8.0)
721                        .sin()
722                        .mul_add((y * 8.0).cos(), (x * y * 16.0).sin());
723                    if pattern > 0.0 {
724                        1.0
725                    } else {
726                        0.0
727                    }
728                }
729            };
730
731            0.1f64.mul_add(fastrand::f64() - 0.5, value) // Add noise
732        });
733
734        let labels = Array1::from_shape_fn(samples_per_task, |row| {
735            // Complex labeling based on multiple features
736            let features = data.row(row);
737            let decision_value = features
738                .iter()
739                .enumerate()
740                .map(|(j, &x)| x * (j as f64 * complexity).mul_add(0.1, 1.0))
741                .sum::<f64>();
742
743            usize::from(decision_value > feature_dim as f64 * 0.5)
744        });
745
746        let task = create_continual_task(
747            format!("complex_task_{i}"),
748            TaskType::Classification { num_classes: 2 },
749            data,
750            labels,
751            0.8,
752        );
753
754        tasks.push(task);
755    }
756
757    tasks
758}
759
760/// Generate challenging task sequence
761fn generate_challenging_sequence(
762    num_tasks: usize,
763    samples_per_task: usize,
764    feature_dim: usize,
765) -> Vec<ContinualTask> {
766    let mut tasks = Vec::new();
767
768    for i in 0..num_tasks {
769        // Alternating between different types of challenges
770        let challenge_type = i % 4;
771
772        let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
773            match challenge_type {
774                0 => {
775                    // High-frequency patterns
776                    let freq = (i as f64).mul_add(2.0, 10.0);
777                    0.4f64.mul_add((freq * row as f64 * 0.01).sin(), 0.5)
778                }
779                1 => {
780                    // Overlapping distributions
781                    let center1 = (i as f64).mul_add(0.05, 0.3);
782                    let center2 = (i as f64).mul_add(-0.05, 0.7);
783                    if row % 2 == 0 {
784                        0.15f64.mul_add(fastrand::f64() - 0.5, center1)
785                    } else {
786                        0.15f64.mul_add(fastrand::f64() - 0.5, center2)
787                    }
788                }
789                2 => {
790                    // Non-linear patterns
791                    let x = row as f64 / samples_per_task as f64;
792                    let y = col as f64 / feature_dim as f64;
793                    let pattern = (i as f64).mul_add(0.1, x.mul_add(x, -(y * y))).tanh();
794                    0.3f64.mul_add(pattern, 0.5)
795                }
796                _ => {
797                    // Sparse patterns
798                    if fastrand::f64() < 0.2 {
799                        0.2f64.mul_add(fastrand::f64(), 0.8)
800                    } else {
801                        0.1 * fastrand::f64()
802                    }
803                }
804            }
805        });
806
807        let labels = Array1::from_shape_fn(samples_per_task, |row| {
808            let features = data.row(row);
809            match challenge_type {
810                0 => usize::from(features.sum() > feature_dim as f64 * 0.5),
811                1 => usize::from(features[0] > 0.5),
812                2 => usize::from(
813                    features
814                        .iter()
815                        .enumerate()
816                        .map(|(j, &x)| x * (j as f64 + 1.0))
817                        .sum::<f64>()
818                        > 2.0,
819                ),
820                _ => usize::from(features.iter().filter(|&&x| x > 0.5).count() > feature_dim / 2),
821            }
822        });
823
824        let task = create_continual_task(
825            format!("challenge_{i}"),
826            TaskType::Classification { num_classes: 2 },
827            data,
828            labels,
829            0.8,
830        );
831
832        tasks.push(task);
833    }
834
835    tasks
836}
837
838/// Generate tasks with increasing difficulty
839fn generate_increasing_difficulty_tasks(
840    num_tasks: usize,
841    samples_per_task: usize,
842    feature_dim: usize,
843) -> Vec<ContinualTask> {
844    let mut tasks = Vec::new();
845
846    for i in 0..num_tasks {
847        let difficulty = (i + 1) as f64;
848        let noise_level = 0.05 + difficulty * 0.02;
849        let pattern_complexity = 1.0 + difficulty * 0.5;
850
851        let data = Array2::from_shape_fn((samples_per_task, feature_dim), |(row, col)| {
852            let x = row as f64 / samples_per_task as f64;
853            let y = col as f64 / feature_dim as f64;
854
855            // Increasingly complex patterns
856            let base_pattern = (x * pattern_complexity * std::f64::consts::PI).sin()
857                * (y * pattern_complexity * std::f64::consts::PI).cos();
858
859            let pattern_value = 0.3f64.mul_add(base_pattern, 0.5);
860            let noise = noise_level * (fastrand::f64() - 0.5);
861
862            (pattern_value + noise).max(0.0).min(1.0)
863        });
864
865        let labels = Array1::from_shape_fn(samples_per_task, |row| {
866            let features = data.row(row);
867
868            // Increasingly complex decision boundaries
869            let decision_value = features
870                .iter()
871                .enumerate()
872                .map(|(j, &x)| {
873                    let weight = 1.0 + (j as f64 * difficulty * 0.1).sin();
874                    x * weight
875                })
876                .sum::<f64>();
877
878            let threshold = feature_dim as f64 * 0.5 * (1.0 + difficulty * 0.1);
879            usize::from(decision_value > threshold)
880        });
881
882        let task = create_continual_task(
883            format!("difficulty_{}", i + 1),
884            TaskType::Classification { num_classes: 2 },
885            data,
886            labels,
887            0.8,
888        );
889
890        tasks.push(task);
891    }
892
893    tasks
894}