QuantumVisionPipeline

Struct QuantumVisionPipeline 

Source
pub struct QuantumVisionPipeline {
    pub config: QuantumVisionConfig,
    pub encoder: QuantumImageEncoder,
    pub backbone: Box<dyn VisionModel>,
    pub task_head: Box<dyn TaskHead>,
    pub feature_extractor: QuantumFeatureExtractor,
    pub preprocessor: ImagePreprocessor,
    pub metrics: VisionMetrics,
}
Expand description

Main quantum computer vision pipeline

Fields§

§config: QuantumVisionConfig

Pipeline configuration

§encoder: QuantumImageEncoder

Image encoder

§backbone: Box<dyn VisionModel>

Vision backbone

§task_head: Box<dyn TaskHead>

Task-specific head

§feature_extractor: QuantumFeatureExtractor

Feature extractor

§preprocessor: ImagePreprocessor

Preprocessing pipeline

§metrics: VisionMetrics

Performance metrics

Implementations§

Source§

impl QuantumVisionPipeline

Source

pub fn new(config: QuantumVisionConfig) -> Result<Self>

Create new quantum vision pipeline

Examples found in repository?
examples/computer_vision.rs (line 212)
132fn vision_backbone_demo() -> Result<()> {
133    println!("   Testing quantum vision backbone architectures...");
134
135    // Different backbone configurations
136    let backbones = vec![
137        (
138            "Quantum CNN",
139            QuantumVisionConfig {
140                num_qubits: 12,
141                encoding_method: ImageEncodingMethod::AmplitudeEncoding,
142                backbone: VisionBackbone::QuantumCNN {
143                    conv_layers: vec![
144                        ConvolutionalConfig {
145                            num_filters: 32,
146                            kernel_size: 3,
147                            stride: 1,
148                            padding: 1,
149                            quantum_kernel: true,
150                            circuit_depth: 4,
151                        },
152                        ConvolutionalConfig {
153                            num_filters: 64,
154                            kernel_size: 3,
155                            stride: 2,
156                            padding: 1,
157                            quantum_kernel: true,
158                            circuit_depth: 6,
159                        },
160                    ],
161                    pooling_type: PoolingType::Quantum,
162                },
163                task_config: VisionTaskConfig::Classification {
164                    num_classes: 10,
165                    multi_label: false,
166                },
167                preprocessing: PreprocessingConfig::default(),
168                quantum_enhancement: QuantumEnhancement::Medium,
169            },
170        ),
171        (
172            "Quantum ViT",
173            QuantumVisionConfig {
174                num_qubits: 16,
175                encoding_method: ImageEncodingMethod::QPIE,
176                backbone: VisionBackbone::QuantumViT {
177                    patch_size: 16,
178                    embed_dim: 768,
179                    num_heads: 12,
180                    depth: 12,
181                },
182                task_config: VisionTaskConfig::Classification {
183                    num_classes: 10,
184                    multi_label: false,
185                },
186                preprocessing: PreprocessingConfig::default(),
187                quantum_enhancement: QuantumEnhancement::High,
188            },
189        ),
190        (
191            "Hybrid CNN-Transformer",
192            QuantumVisionConfig {
193                num_qubits: 14,
194                encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
195                backbone: VisionBackbone::HybridBackbone {
196                    cnn_layers: 4,
197                    transformer_layers: 2,
198                },
199                task_config: VisionTaskConfig::Classification {
200                    num_classes: 10,
201                    multi_label: false,
202                },
203                preprocessing: PreprocessingConfig::default(),
204                quantum_enhancement: QuantumEnhancement::High,
205            },
206        ),
207    ];
208
209    for (name, config) in backbones {
210        println!("\n   --- {name} Backbone ---");
211
212        let mut pipeline = QuantumVisionPipeline::new(config)?;
213
214        // Test forward pass
215        let test_images = create_test_image(2, 3, 224, 224)?;
216        let output = pipeline.forward(&test_images)?;
217
218        if let TaskOutput::Classification {
219            logits,
220            probabilities,
221        } = &output
222        {
223            println!("   Output shape: {:?}", logits.dim());
224            println!("   Probability shape: {:?}", probabilities.dim());
225        }
226
227        // Get metrics
228        let metrics = pipeline.metrics();
229        println!("   Quantum metrics:");
230        println!(
231            "   - Circuit depth: {}",
232            metrics.quantum_metrics.circuit_depth
233        );
234        println!(
235            "   - Quantum advantage: {:.2}x",
236            metrics.quantum_metrics.quantum_advantage
237        );
238        println!(
239            "   - Coherence utilization: {:.1}%",
240            metrics.quantum_metrics.coherence_utilization * 100.0
241        );
242
243        // Architecture-specific properties
244        match name {
245            "Quantum CNN" => {
246                println!("   ✓ Hierarchical feature extraction with quantum convolutions");
247            }
248            "Quantum ViT" => {
249                println!("   ✓ Global context modeling with quantum attention");
250            }
251            "Hybrid CNN-Transformer" => {
252                println!("   ✓ Local features + global context integration");
253            }
254            _ => {}
255        }
256    }
257
258    Ok(())
259}
260
261/// Demonstrate image classification
262fn classification_demo() -> Result<()> {
263    println!("   Quantum image classification demo...");
264
265    // Create classification pipeline
266    let config = QuantumVisionConfig::default();
267    let mut pipeline = QuantumVisionPipeline::new(config)?;
268
269    // Create synthetic dataset
270    let num_classes = 10;
271    let num_samples = 20;
272    let (train_data, val_data) = create_classification_dataset(num_samples, num_classes)?;
273
274    println!(
275        "   Dataset: {} training, {} validation samples",
276        train_data.len(),
277        val_data.len()
278    );
279
280    // Train the model (simplified)
281    println!("\n   Training quantum classifier...");
282    let history = pipeline.train(
283        &train_data,
284        &val_data,
285        5, // epochs
286        OptimizationMethod::Adam,
287    )?;
288
289    // Display training results
290    println!("\n   Training results:");
291    for (epoch, train_loss, val_loss) in history
292        .epochs
293        .iter()
294        .zip(history.train_losses.iter())
295        .zip(history.val_losses.iter())
296        .map(|((e, t), v)| (e, t, v))
297    {
298        println!(
299            "   Epoch {}: train_loss={:.4}, val_loss={:.4}",
300            epoch + 1,
301            train_loss,
302            val_loss
303        );
304    }
305
306    // Test on new images
307    println!("\n   Testing on new images...");
308    let test_images = create_test_image(5, 3, 224, 224)?;
309    let predictions = pipeline.forward(&test_images)?;
310
311    if let TaskOutput::Classification { probabilities, .. } = predictions {
312        for (i, prob_row) in probabilities.outer_iter().enumerate() {
313            let (predicted_class, confidence) = prob_row
314                .iter()
315                .enumerate()
316                .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
317                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
318
319            println!(
320                "   Image {}: Class {} (confidence: {:.2}%)",
321                i + 1,
322                predicted_class,
323                confidence * 100.0
324            );
325        }
326    }
327
328    // Analyze quantum advantage
329    let quantum_advantage = analyze_classification_quantum_advantage(&pipeline)?;
330    println!("\n   Quantum advantage analysis:");
331    println!(
332        "   - Parameter efficiency: {:.2}x classical",
333        quantum_advantage.param_efficiency
334    );
335    println!(
336        "   - Feature expressiveness: {:.2}x",
337        quantum_advantage.expressiveness
338    );
339    println!(
340        "   - Training speedup: {:.2}x",
341        quantum_advantage.training_speedup
342    );
343
344    Ok(())
345}
346
347/// Demonstrate object detection
348fn object_detection_demo() -> Result<()> {
349    println!("   Quantum object detection demo...");
350
351    // Create detection pipeline
352    let config = QuantumVisionConfig::object_detection(80); // 80 classes (COCO-like)
353    let mut pipeline = QuantumVisionPipeline::new(config)?;
354
355    // Test image
356    let test_images = create_test_image(2, 3, 416, 416)?;
357
358    println!(
359        "   Processing {} images for object detection...",
360        test_images.dim().0
361    );
362
363    // Run detection
364    let detections = pipeline.forward(&test_images)?;
365
366    if let TaskOutput::Detection {
367        boxes,
368        scores,
369        classes,
370    } = detections
371    {
372        println!("   Detection results:");
373
374        for batch_idx in 0..boxes.dim().0 {
375            println!("\n   Image {}:", batch_idx + 1);
376
377            // Filter detections by score threshold
378            let threshold = 0.5;
379            let mut num_detections = 0;
380
381            for det_idx in 0..boxes.dim().1 {
382                let score = scores[[batch_idx, det_idx]];
383
384                if score > threshold {
385                    let class_id = classes[[batch_idx, det_idx]];
386                    let bbox = boxes.slice(scirs2_core::ndarray::s![batch_idx, det_idx, ..]);
387
388                    println!(
389                        "   - Object {}: Class {}, Score {:.3}, Box [{:.1}, {:.1}, {:.1}, {:.1}]",
390                        num_detections + 1,
391                        class_id,
392                        score,
393                        bbox[0],
394                        bbox[1],
395                        bbox[2],
396                        bbox[3]
397                    );
398
399                    num_detections += 1;
400                }
401            }
402
403            if num_detections == 0 {
404                println!("   - No objects detected above threshold");
405            } else {
406                println!("   Total objects detected: {num_detections}");
407            }
408        }
409    }
410
411    // Analyze detection performance
412    println!("\n   Detection performance analysis:");
413    println!("   - Quantum anchor generation improves localization");
414    println!("   - Entangled features enhance multi-scale detection");
415    println!("   - Quantum NMS reduces redundant detections");
416
417    Ok(())
418}
419
420/// Demonstrate semantic segmentation
421fn segmentation_demo() -> Result<()> {
422    println!("   Quantum semantic segmentation demo...");
423
424    // Create segmentation pipeline
425    let config = QuantumVisionConfig::segmentation(21); // 21 classes (Pascal VOC-like)
426    let mut pipeline = QuantumVisionPipeline::new(config)?;
427
428    // Test images
429    let test_images = create_test_image(1, 3, 512, 512)?;
430
431    println!("   Processing image for semantic segmentation...");
432
433    // Run segmentation
434    let segmentation = pipeline.forward(&test_images)?;
435
436    if let TaskOutput::Segmentation {
437        masks,
438        class_scores,
439    } = segmentation
440    {
441        println!("   Segmentation results:");
442        println!("   - Mask shape: {:?}", masks.dim());
443        println!("   - Class scores shape: {:?}", class_scores.dim());
444
445        // Analyze segmentation quality
446        let seg_metrics = analyze_segmentation_quality(&masks, &class_scores)?;
447        println!("\n   Segmentation metrics:");
448        println!("   - Mean IoU: {:.3}", seg_metrics.mean_iou);
449        println!(
450            "   - Pixel accuracy: {:.1}%",
451            seg_metrics.pixel_accuracy * 100.0
452        );
453        println!(
454            "   - Boundary precision: {:.3}",
455            seg_metrics.boundary_precision
456        );
457
458        // Class distribution
459        println!("\n   Predicted class distribution:");
460        let class_counts = compute_class_distribution(&masks)?;
461        for (class_id, count) in class_counts.iter().take(5) {
462            let percentage = *count as f64 / (512.0 * 512.0) * 100.0;
463            println!("   - Class {class_id}: {percentage:.1}% of pixels");
464        }
465    }
466
467    // Quantum advantages for segmentation
468    println!("\n   Quantum segmentation advantages:");
469    println!("   - Quantum attention captures long-range dependencies");
470    println!("   - Hierarchical encoding preserves multi-scale features");
471    println!("   - Entanglement enables pixel-to-pixel correlations");
472
473    Ok(())
474}
475
476/// Demonstrate feature extraction
477fn feature_extraction_demo() -> Result<()> {
478    println!("   Quantum feature extraction demo...");
479
480    // Create feature extraction pipeline
481    let config = QuantumVisionConfig {
482        num_qubits: 14,
483        encoding_method: ImageEncodingMethod::QPIE,
484        backbone: VisionBackbone::QuantumResNet {
485            blocks: vec![
486                ResidualBlock {
487                    channels: 64,
488                    kernel_size: 3,
489                    stride: 1,
490                    quantum_conv: true,
491                },
492                ResidualBlock {
493                    channels: 128,
494                    kernel_size: 3,
495                    stride: 2,
496                    quantum_conv: true,
497                },
498            ],
499            skip_connections: true,
500        },
501        task_config: VisionTaskConfig::FeatureExtraction {
502            feature_dim: 512,
503            normalize: true,
504        },
505        preprocessing: PreprocessingConfig::default(),
506        quantum_enhancement: QuantumEnhancement::High,
507    };
508
509    let mut pipeline = QuantumVisionPipeline::new(config)?;
510
511    // Extract features from multiple images
512    let num_images = 10;
513    let test_images = create_test_image(num_images, 3, 224, 224)?;
514
515    println!("   Extracting features from {num_images} images...");
516
517    let features_output = pipeline.forward(&test_images)?;
518
519    if let TaskOutput::Features {
520        features,
521        attention_maps,
522    } = features_output
523    {
524        println!("   Feature extraction results:");
525        println!("   - Feature dimension: {}", features.dim().1);
526        println!("   - Features normalized: Yes");
527
528        // Compute feature statistics
529        let feature_stats = compute_feature_statistics(&features)?;
530        println!("\n   Feature statistics:");
531        println!("   - Mean magnitude: {:.4}", feature_stats.mean_magnitude);
532        println!("   - Variance: {:.4}", feature_stats.variance);
533        println!("   - Sparsity: {:.1}%", feature_stats.sparsity * 100.0);
534
535        // Compute pairwise similarities
536        println!("\n   Feature similarity matrix (first 5 images):");
537        let similarities = compute_cosine_similarities(&features)?;
538
539        print!("       ");
540        for i in 0..5.min(num_images) {
541            print!("Img{}  ", i + 1);
542        }
543        println!();
544
545        for i in 0..5.min(num_images) {
546            print!("   Img{} ", i + 1);
547            for j in 0..5.min(num_images) {
548                print!("{:.3} ", similarities[[i, j]]);
549            }
550            println!();
551        }
552
553        // Quantum feature properties
554        println!("\n   Quantum feature properties:");
555        println!("   - Entanglement enhances discriminative power");
556        println!("   - Quantum superposition encodes multiple views");
557        println!("   - Phase information captures subtle variations");
558    }
559
560    Ok(())
561}
562
563/// Demonstrate multi-task learning
564fn multitask_demo() -> Result<()> {
565    println!("   Multi-task quantum vision demo...");
566
567    // Create a pipeline that can handle multiple tasks
568    let tasks = vec![
569        (
570            "Classification",
571            VisionTaskConfig::Classification {
572                num_classes: 10,
573                multi_label: false,
574            },
575        ),
576        (
577            "Detection",
578            VisionTaskConfig::ObjectDetection {
579                num_classes: 20,
580                anchor_sizes: vec![(32, 32), (64, 64)],
581                iou_threshold: 0.5,
582            },
583        ),
584        (
585            "Segmentation",
586            VisionTaskConfig::Segmentation {
587                num_classes: 10,
588                output_stride: 8,
589            },
590        ),
591    ];
592
593    println!(
594        "   Testing {} vision tasks with shared backbone...",
595        tasks.len()
596    );
597
598    // Use same backbone for all tasks
599    let base_config = QuantumVisionConfig {
600        num_qubits: 16,
601        encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
602        backbone: VisionBackbone::HybridBackbone {
603            cnn_layers: 4,
604            transformer_layers: 2,
605        },
606        task_config: tasks[0].1.clone(), // Will be replaced for each task
607        preprocessing: PreprocessingConfig::default(),
608        quantum_enhancement: QuantumEnhancement::High,
609    };
610
611    // Test each task
612    let test_images = create_test_image(2, 3, 416, 416)?;
613
614    for (task_name, task_config) in tasks {
615        println!("\n   --- {task_name} Task ---");
616
617        let mut config = base_config.clone();
618        config.task_config = task_config;
619
620        let mut pipeline = QuantumVisionPipeline::new(config)?;
621        let output = pipeline.forward(&test_images)?;
622
623        match output {
624            TaskOutput::Classification { logits, .. } => {
625                println!("   Classification output shape: {:?}", logits.dim());
626            }
627            TaskOutput::Detection { boxes, scores, .. } => {
628                println!(
629                    "   Detection: {} anchors, score shape: {:?}",
630                    boxes.dim().1,
631                    scores.dim()
632                );
633            }
634            TaskOutput::Segmentation { masks, .. } => {
635                println!("   Segmentation mask shape: {:?}", masks.dim());
636            }
637            _ => {}
638        }
639
640        // Task-specific quantum advantages
641        match task_name {
642            "Classification" => {
643                println!("   ✓ Quantum features improve class discrimination");
644            }
645            "Detection" => {
646                println!("   ✓ Quantum anchors adapt to object scales");
647            }
648            "Segmentation" => {
649                println!("   ✓ Quantum correlations enhance boundary detection");
650            }
651            _ => {}
652        }
653    }
654
655    println!("\n   Multi-task benefits:");
656    println!("   - Shared quantum backbone reduces parameters");
657    println!("   - Task-specific quantum heads optimize performance");
658    println!("   - Quantum entanglement enables cross-task learning");
659
660    Ok(())
661}
662
663/// Demonstrate performance analysis
664fn performance_analysis_demo() -> Result<()> {
665    println!("   Analyzing quantum vision performance...");
666
667    // Compare different quantum enhancement levels
668    let enhancement_levels = vec![
669        ("Low", QuantumEnhancement::Low),
670        ("Medium", QuantumEnhancement::Medium),
671        ("High", QuantumEnhancement::High),
672        (
673            "Custom",
674            QuantumEnhancement::Custom {
675                quantum_layers: vec![0, 2, 4, 6],
676                entanglement_strength: 0.8,
677            },
678        ),
679    ];
680
681    println!("\n   Quantum Enhancement Level Comparison:");
682    println!("   Level    | FLOPs   | Memory  | Accuracy | Q-Advantage");
683    println!("   ---------|---------|---------|----------|------------");
684
685    for (level_name, enhancement) in enhancement_levels {
686        let config = QuantumVisionConfig {
687            num_qubits: 12,
688            encoding_method: ImageEncodingMethod::AmplitudeEncoding,
689            backbone: VisionBackbone::QuantumCNN {
690                conv_layers: vec![ConvolutionalConfig {
691                    num_filters: 32,
692                    kernel_size: 3,
693                    stride: 1,
694                    padding: 1,
695                    quantum_kernel: true,
696                    circuit_depth: 4,
697                }],
698                pooling_type: PoolingType::Quantum,
699            },
700            task_config: VisionTaskConfig::Classification {
701                num_classes: 10,
702                multi_label: false,
703            },
704            preprocessing: PreprocessingConfig::default(),
705            quantum_enhancement: enhancement,
706        };
707
708        let pipeline = QuantumVisionPipeline::new(config)?;
709        let metrics = pipeline.metrics();
710
711        // Simulate performance metrics
712        let (flops, memory, accuracy, q_advantage) = match level_name {
713            "Low" => (1.2, 50.0, 0.85, 1.2),
714            "Medium" => (2.5, 80.0, 0.88, 1.5),
715            "High" => (4.1, 120.0, 0.91, 2.1),
716            "Custom" => (3.2, 95.0, 0.90, 1.8),
717            _ => (0.0, 0.0, 0.0, 0.0),
718        };
719
720        println!(
721            "   {:<8} | {:.1}G | {:.0}MB | {:.1}%  | {:.1}x",
722            level_name,
723            flops,
724            memory,
725            accuracy * 100.0,
726            q_advantage
727        );
728    }
729
730    // Scalability analysis
731    println!("\n   Scalability Analysis:");
732    let image_sizes = vec![64, 128, 224, 416, 512];
733
734    println!("   Image Size | Inference Time | Throughput");
735    println!("   -----------|----------------|------------");
736
737    for size in image_sizes {
738        let inference_time = (f64::from(size) / 100.0).mul_add(f64::from(size) / 100.0, 5.0);
739        let throughput = 1000.0 / inference_time;
740
741        println!("   {size}x{size}   | {inference_time:.1}ms        | {throughput:.0} img/s");
742    }
743
744    // Quantum advantages summary
745    println!("\n   Quantum Computer Vision Advantages:");
746    println!("   1. Exponential feature space with limited qubits");
747    println!("   2. Natural multi-scale representation via entanglement");
748    println!("   3. Quantum attention for global context modeling");
749    println!("   4. Phase encoding for rotation-invariant features");
750    println!("   5. Quantum pooling preserves superposition information");
751
752    // Hardware requirements
753    println!("\n   Hardware Requirements:");
754    println!("   - Minimum qubits: 10 (basic tasks)");
755    println!("   - Recommended: 16-20 qubits (complex tasks)");
756    println!("   - Coherence time: >100μs for deep networks");
757    println!("   - Gate fidelity: >99.9% for accurate predictions");
758
759    Ok(())
760}
Source

pub fn forward(&mut self, images: &Array4<f64>) -> Result<TaskOutput>

Process images through the pipeline

Examples found in repository?
examples/computer_vision.rs (line 216)
132fn vision_backbone_demo() -> Result<()> {
133    println!("   Testing quantum vision backbone architectures...");
134
135    // Different backbone configurations
136    let backbones = vec![
137        (
138            "Quantum CNN",
139            QuantumVisionConfig {
140                num_qubits: 12,
141                encoding_method: ImageEncodingMethod::AmplitudeEncoding,
142                backbone: VisionBackbone::QuantumCNN {
143                    conv_layers: vec![
144                        ConvolutionalConfig {
145                            num_filters: 32,
146                            kernel_size: 3,
147                            stride: 1,
148                            padding: 1,
149                            quantum_kernel: true,
150                            circuit_depth: 4,
151                        },
152                        ConvolutionalConfig {
153                            num_filters: 64,
154                            kernel_size: 3,
155                            stride: 2,
156                            padding: 1,
157                            quantum_kernel: true,
158                            circuit_depth: 6,
159                        },
160                    ],
161                    pooling_type: PoolingType::Quantum,
162                },
163                task_config: VisionTaskConfig::Classification {
164                    num_classes: 10,
165                    multi_label: false,
166                },
167                preprocessing: PreprocessingConfig::default(),
168                quantum_enhancement: QuantumEnhancement::Medium,
169            },
170        ),
171        (
172            "Quantum ViT",
173            QuantumVisionConfig {
174                num_qubits: 16,
175                encoding_method: ImageEncodingMethod::QPIE,
176                backbone: VisionBackbone::QuantumViT {
177                    patch_size: 16,
178                    embed_dim: 768,
179                    num_heads: 12,
180                    depth: 12,
181                },
182                task_config: VisionTaskConfig::Classification {
183                    num_classes: 10,
184                    multi_label: false,
185                },
186                preprocessing: PreprocessingConfig::default(),
187                quantum_enhancement: QuantumEnhancement::High,
188            },
189        ),
190        (
191            "Hybrid CNN-Transformer",
192            QuantumVisionConfig {
193                num_qubits: 14,
194                encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
195                backbone: VisionBackbone::HybridBackbone {
196                    cnn_layers: 4,
197                    transformer_layers: 2,
198                },
199                task_config: VisionTaskConfig::Classification {
200                    num_classes: 10,
201                    multi_label: false,
202                },
203                preprocessing: PreprocessingConfig::default(),
204                quantum_enhancement: QuantumEnhancement::High,
205            },
206        ),
207    ];
208
209    for (name, config) in backbones {
210        println!("\n   --- {name} Backbone ---");
211
212        let mut pipeline = QuantumVisionPipeline::new(config)?;
213
214        // Test forward pass
215        let test_images = create_test_image(2, 3, 224, 224)?;
216        let output = pipeline.forward(&test_images)?;
217
218        if let TaskOutput::Classification {
219            logits,
220            probabilities,
221        } = &output
222        {
223            println!("   Output shape: {:?}", logits.dim());
224            println!("   Probability shape: {:?}", probabilities.dim());
225        }
226
227        // Get metrics
228        let metrics = pipeline.metrics();
229        println!("   Quantum metrics:");
230        println!(
231            "   - Circuit depth: {}",
232            metrics.quantum_metrics.circuit_depth
233        );
234        println!(
235            "   - Quantum advantage: {:.2}x",
236            metrics.quantum_metrics.quantum_advantage
237        );
238        println!(
239            "   - Coherence utilization: {:.1}%",
240            metrics.quantum_metrics.coherence_utilization * 100.0
241        );
242
243        // Architecture-specific properties
244        match name {
245            "Quantum CNN" => {
246                println!("   ✓ Hierarchical feature extraction with quantum convolutions");
247            }
248            "Quantum ViT" => {
249                println!("   ✓ Global context modeling with quantum attention");
250            }
251            "Hybrid CNN-Transformer" => {
252                println!("   ✓ Local features + global context integration");
253            }
254            _ => {}
255        }
256    }
257
258    Ok(())
259}
260
261/// Demonstrate image classification
262fn classification_demo() -> Result<()> {
263    println!("   Quantum image classification demo...");
264
265    // Create classification pipeline
266    let config = QuantumVisionConfig::default();
267    let mut pipeline = QuantumVisionPipeline::new(config)?;
268
269    // Create synthetic dataset
270    let num_classes = 10;
271    let num_samples = 20;
272    let (train_data, val_data) = create_classification_dataset(num_samples, num_classes)?;
273
274    println!(
275        "   Dataset: {} training, {} validation samples",
276        train_data.len(),
277        val_data.len()
278    );
279
280    // Train the model (simplified)
281    println!("\n   Training quantum classifier...");
282    let history = pipeline.train(
283        &train_data,
284        &val_data,
285        5, // epochs
286        OptimizationMethod::Adam,
287    )?;
288
289    // Display training results
290    println!("\n   Training results:");
291    for (epoch, train_loss, val_loss) in history
292        .epochs
293        .iter()
294        .zip(history.train_losses.iter())
295        .zip(history.val_losses.iter())
296        .map(|((e, t), v)| (e, t, v))
297    {
298        println!(
299            "   Epoch {}: train_loss={:.4}, val_loss={:.4}",
300            epoch + 1,
301            train_loss,
302            val_loss
303        );
304    }
305
306    // Test on new images
307    println!("\n   Testing on new images...");
308    let test_images = create_test_image(5, 3, 224, 224)?;
309    let predictions = pipeline.forward(&test_images)?;
310
311    if let TaskOutput::Classification { probabilities, .. } = predictions {
312        for (i, prob_row) in probabilities.outer_iter().enumerate() {
313            let (predicted_class, confidence) = prob_row
314                .iter()
315                .enumerate()
316                .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
317                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
318
319            println!(
320                "   Image {}: Class {} (confidence: {:.2}%)",
321                i + 1,
322                predicted_class,
323                confidence * 100.0
324            );
325        }
326    }
327
328    // Analyze quantum advantage
329    let quantum_advantage = analyze_classification_quantum_advantage(&pipeline)?;
330    println!("\n   Quantum advantage analysis:");
331    println!(
332        "   - Parameter efficiency: {:.2}x classical",
333        quantum_advantage.param_efficiency
334    );
335    println!(
336        "   - Feature expressiveness: {:.2}x",
337        quantum_advantage.expressiveness
338    );
339    println!(
340        "   - Training speedup: {:.2}x",
341        quantum_advantage.training_speedup
342    );
343
344    Ok(())
345}
346
347/// Demonstrate object detection
348fn object_detection_demo() -> Result<()> {
349    println!("   Quantum object detection demo...");
350
351    // Create detection pipeline
352    let config = QuantumVisionConfig::object_detection(80); // 80 classes (COCO-like)
353    let mut pipeline = QuantumVisionPipeline::new(config)?;
354
355    // Test image
356    let test_images = create_test_image(2, 3, 416, 416)?;
357
358    println!(
359        "   Processing {} images for object detection...",
360        test_images.dim().0
361    );
362
363    // Run detection
364    let detections = pipeline.forward(&test_images)?;
365
366    if let TaskOutput::Detection {
367        boxes,
368        scores,
369        classes,
370    } = detections
371    {
372        println!("   Detection results:");
373
374        for batch_idx in 0..boxes.dim().0 {
375            println!("\n   Image {}:", batch_idx + 1);
376
377            // Filter detections by score threshold
378            let threshold = 0.5;
379            let mut num_detections = 0;
380
381            for det_idx in 0..boxes.dim().1 {
382                let score = scores[[batch_idx, det_idx]];
383
384                if score > threshold {
385                    let class_id = classes[[batch_idx, det_idx]];
386                    let bbox = boxes.slice(scirs2_core::ndarray::s![batch_idx, det_idx, ..]);
387
388                    println!(
389                        "   - Object {}: Class {}, Score {:.3}, Box [{:.1}, {:.1}, {:.1}, {:.1}]",
390                        num_detections + 1,
391                        class_id,
392                        score,
393                        bbox[0],
394                        bbox[1],
395                        bbox[2],
396                        bbox[3]
397                    );
398
399                    num_detections += 1;
400                }
401            }
402
403            if num_detections == 0 {
404                println!("   - No objects detected above threshold");
405            } else {
406                println!("   Total objects detected: {num_detections}");
407            }
408        }
409    }
410
411    // Analyze detection performance
412    println!("\n   Detection performance analysis:");
413    println!("   - Quantum anchor generation improves localization");
414    println!("   - Entangled features enhance multi-scale detection");
415    println!("   - Quantum NMS reduces redundant detections");
416
417    Ok(())
418}
419
420/// Demonstrate semantic segmentation
421fn segmentation_demo() -> Result<()> {
422    println!("   Quantum semantic segmentation demo...");
423
424    // Create segmentation pipeline
425    let config = QuantumVisionConfig::segmentation(21); // 21 classes (Pascal VOC-like)
426    let mut pipeline = QuantumVisionPipeline::new(config)?;
427
428    // Test images
429    let test_images = create_test_image(1, 3, 512, 512)?;
430
431    println!("   Processing image for semantic segmentation...");
432
433    // Run segmentation
434    let segmentation = pipeline.forward(&test_images)?;
435
436    if let TaskOutput::Segmentation {
437        masks,
438        class_scores,
439    } = segmentation
440    {
441        println!("   Segmentation results:");
442        println!("   - Mask shape: {:?}", masks.dim());
443        println!("   - Class scores shape: {:?}", class_scores.dim());
444
445        // Analyze segmentation quality
446        let seg_metrics = analyze_segmentation_quality(&masks, &class_scores)?;
447        println!("\n   Segmentation metrics:");
448        println!("   - Mean IoU: {:.3}", seg_metrics.mean_iou);
449        println!(
450            "   - Pixel accuracy: {:.1}%",
451            seg_metrics.pixel_accuracy * 100.0
452        );
453        println!(
454            "   - Boundary precision: {:.3}",
455            seg_metrics.boundary_precision
456        );
457
458        // Class distribution
459        println!("\n   Predicted class distribution:");
460        let class_counts = compute_class_distribution(&masks)?;
461        for (class_id, count) in class_counts.iter().take(5) {
462            let percentage = *count as f64 / (512.0 * 512.0) * 100.0;
463            println!("   - Class {class_id}: {percentage:.1}% of pixels");
464        }
465    }
466
467    // Quantum advantages for segmentation
468    println!("\n   Quantum segmentation advantages:");
469    println!("   - Quantum attention captures long-range dependencies");
470    println!("   - Hierarchical encoding preserves multi-scale features");
471    println!("   - Entanglement enables pixel-to-pixel correlations");
472
473    Ok(())
474}
475
476/// Demonstrate feature extraction
477fn feature_extraction_demo() -> Result<()> {
478    println!("   Quantum feature extraction demo...");
479
480    // Create feature extraction pipeline
481    let config = QuantumVisionConfig {
482        num_qubits: 14,
483        encoding_method: ImageEncodingMethod::QPIE,
484        backbone: VisionBackbone::QuantumResNet {
485            blocks: vec![
486                ResidualBlock {
487                    channels: 64,
488                    kernel_size: 3,
489                    stride: 1,
490                    quantum_conv: true,
491                },
492                ResidualBlock {
493                    channels: 128,
494                    kernel_size: 3,
495                    stride: 2,
496                    quantum_conv: true,
497                },
498            ],
499            skip_connections: true,
500        },
501        task_config: VisionTaskConfig::FeatureExtraction {
502            feature_dim: 512,
503            normalize: true,
504        },
505        preprocessing: PreprocessingConfig::default(),
506        quantum_enhancement: QuantumEnhancement::High,
507    };
508
509    let mut pipeline = QuantumVisionPipeline::new(config)?;
510
511    // Extract features from multiple images
512    let num_images = 10;
513    let test_images = create_test_image(num_images, 3, 224, 224)?;
514
515    println!("   Extracting features from {num_images} images...");
516
517    let features_output = pipeline.forward(&test_images)?;
518
519    if let TaskOutput::Features {
520        features,
521        attention_maps,
522    } = features_output
523    {
524        println!("   Feature extraction results:");
525        println!("   - Feature dimension: {}", features.dim().1);
526        println!("   - Features normalized: Yes");
527
528        // Compute feature statistics
529        let feature_stats = compute_feature_statistics(&features)?;
530        println!("\n   Feature statistics:");
531        println!("   - Mean magnitude: {:.4}", feature_stats.mean_magnitude);
532        println!("   - Variance: {:.4}", feature_stats.variance);
533        println!("   - Sparsity: {:.1}%", feature_stats.sparsity * 100.0);
534
535        // Compute pairwise similarities
536        println!("\n   Feature similarity matrix (first 5 images):");
537        let similarities = compute_cosine_similarities(&features)?;
538
539        print!("       ");
540        for i in 0..5.min(num_images) {
541            print!("Img{}  ", i + 1);
542        }
543        println!();
544
545        for i in 0..5.min(num_images) {
546            print!("   Img{} ", i + 1);
547            for j in 0..5.min(num_images) {
548                print!("{:.3} ", similarities[[i, j]]);
549            }
550            println!();
551        }
552
553        // Quantum feature properties
554        println!("\n   Quantum feature properties:");
555        println!("   - Entanglement enhances discriminative power");
556        println!("   - Quantum superposition encodes multiple views");
557        println!("   - Phase information captures subtle variations");
558    }
559
560    Ok(())
561}
562
563/// Demonstrate multi-task learning
564fn multitask_demo() -> Result<()> {
565    println!("   Multi-task quantum vision demo...");
566
567    // Create a pipeline that can handle multiple tasks
568    let tasks = vec![
569        (
570            "Classification",
571            VisionTaskConfig::Classification {
572                num_classes: 10,
573                multi_label: false,
574            },
575        ),
576        (
577            "Detection",
578            VisionTaskConfig::ObjectDetection {
579                num_classes: 20,
580                anchor_sizes: vec![(32, 32), (64, 64)],
581                iou_threshold: 0.5,
582            },
583        ),
584        (
585            "Segmentation",
586            VisionTaskConfig::Segmentation {
587                num_classes: 10,
588                output_stride: 8,
589            },
590        ),
591    ];
592
593    println!(
594        "   Testing {} vision tasks with shared backbone...",
595        tasks.len()
596    );
597
598    // Use same backbone for all tasks
599    let base_config = QuantumVisionConfig {
600        num_qubits: 16,
601        encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
602        backbone: VisionBackbone::HybridBackbone {
603            cnn_layers: 4,
604            transformer_layers: 2,
605        },
606        task_config: tasks[0].1.clone(), // Will be replaced for each task
607        preprocessing: PreprocessingConfig::default(),
608        quantum_enhancement: QuantumEnhancement::High,
609    };
610
611    // Test each task
612    let test_images = create_test_image(2, 3, 416, 416)?;
613
614    for (task_name, task_config) in tasks {
615        println!("\n   --- {task_name} Task ---");
616
617        let mut config = base_config.clone();
618        config.task_config = task_config;
619
620        let mut pipeline = QuantumVisionPipeline::new(config)?;
621        let output = pipeline.forward(&test_images)?;
622
623        match output {
624            TaskOutput::Classification { logits, .. } => {
625                println!("   Classification output shape: {:?}", logits.dim());
626            }
627            TaskOutput::Detection { boxes, scores, .. } => {
628                println!(
629                    "   Detection: {} anchors, score shape: {:?}",
630                    boxes.dim().1,
631                    scores.dim()
632                );
633            }
634            TaskOutput::Segmentation { masks, .. } => {
635                println!("   Segmentation mask shape: {:?}", masks.dim());
636            }
637            _ => {}
638        }
639
640        // Task-specific quantum advantages
641        match task_name {
642            "Classification" => {
643                println!("   ✓ Quantum features improve class discrimination");
644            }
645            "Detection" => {
646                println!("   ✓ Quantum anchors adapt to object scales");
647            }
648            "Segmentation" => {
649                println!("   ✓ Quantum correlations enhance boundary detection");
650            }
651            _ => {}
652        }
653    }
654
655    println!("\n   Multi-task benefits:");
656    println!("   - Shared quantum backbone reduces parameters");
657    println!("   - Task-specific quantum heads optimize performance");
658    println!("   - Quantum entanglement enables cross-task learning");
659
660    Ok(())
661}
Source

pub fn train( &mut self, train_data: &[(Array4<f64>, TaskTarget)], val_data: &[(Array4<f64>, TaskTarget)], epochs: usize, optimizer: OptimizationMethod, ) -> Result<TrainingHistory>

Train the pipeline

Examples found in repository?
examples/computer_vision.rs (lines 282-287)
262fn classification_demo() -> Result<()> {
263    println!("   Quantum image classification demo...");
264
265    // Create classification pipeline
266    let config = QuantumVisionConfig::default();
267    let mut pipeline = QuantumVisionPipeline::new(config)?;
268
269    // Create synthetic dataset
270    let num_classes = 10;
271    let num_samples = 20;
272    let (train_data, val_data) = create_classification_dataset(num_samples, num_classes)?;
273
274    println!(
275        "   Dataset: {} training, {} validation samples",
276        train_data.len(),
277        val_data.len()
278    );
279
280    // Train the model (simplified)
281    println!("\n   Training quantum classifier...");
282    let history = pipeline.train(
283        &train_data,
284        &val_data,
285        5, // epochs
286        OptimizationMethod::Adam,
287    )?;
288
289    // Display training results
290    println!("\n   Training results:");
291    for (epoch, train_loss, val_loss) in history
292        .epochs
293        .iter()
294        .zip(history.train_losses.iter())
295        .zip(history.val_losses.iter())
296        .map(|((e, t), v)| (e, t, v))
297    {
298        println!(
299            "   Epoch {}: train_loss={:.4}, val_loss={:.4}",
300            epoch + 1,
301            train_loss,
302            val_loss
303        );
304    }
305
306    // Test on new images
307    println!("\n   Testing on new images...");
308    let test_images = create_test_image(5, 3, 224, 224)?;
309    let predictions = pipeline.forward(&test_images)?;
310
311    if let TaskOutput::Classification { probabilities, .. } = predictions {
312        for (i, prob_row) in probabilities.outer_iter().enumerate() {
313            let (predicted_class, confidence) = prob_row
314                .iter()
315                .enumerate()
316                .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
317                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
318
319            println!(
320                "   Image {}: Class {} (confidence: {:.2}%)",
321                i + 1,
322                predicted_class,
323                confidence * 100.0
324            );
325        }
326    }
327
328    // Analyze quantum advantage
329    let quantum_advantage = analyze_classification_quantum_advantage(&pipeline)?;
330    println!("\n   Quantum advantage analysis:");
331    println!(
332        "   - Parameter efficiency: {:.2}x classical",
333        quantum_advantage.param_efficiency
334    );
335    println!(
336        "   - Feature expressiveness: {:.2}x",
337        quantum_advantage.expressiveness
338    );
339    println!(
340        "   - Training speedup: {:.2}x",
341        quantum_advantage.training_speedup
342    );
343
344    Ok(())
345}
Source

pub fn metrics(&self) -> &VisionMetrics

Get performance metrics

Examples found in repository?
examples/computer_vision.rs (line 228)
132fn vision_backbone_demo() -> Result<()> {
133    println!("   Testing quantum vision backbone architectures...");
134
135    // Different backbone configurations
136    let backbones = vec![
137        (
138            "Quantum CNN",
139            QuantumVisionConfig {
140                num_qubits: 12,
141                encoding_method: ImageEncodingMethod::AmplitudeEncoding,
142                backbone: VisionBackbone::QuantumCNN {
143                    conv_layers: vec![
144                        ConvolutionalConfig {
145                            num_filters: 32,
146                            kernel_size: 3,
147                            stride: 1,
148                            padding: 1,
149                            quantum_kernel: true,
150                            circuit_depth: 4,
151                        },
152                        ConvolutionalConfig {
153                            num_filters: 64,
154                            kernel_size: 3,
155                            stride: 2,
156                            padding: 1,
157                            quantum_kernel: true,
158                            circuit_depth: 6,
159                        },
160                    ],
161                    pooling_type: PoolingType::Quantum,
162                },
163                task_config: VisionTaskConfig::Classification {
164                    num_classes: 10,
165                    multi_label: false,
166                },
167                preprocessing: PreprocessingConfig::default(),
168                quantum_enhancement: QuantumEnhancement::Medium,
169            },
170        ),
171        (
172            "Quantum ViT",
173            QuantumVisionConfig {
174                num_qubits: 16,
175                encoding_method: ImageEncodingMethod::QPIE,
176                backbone: VisionBackbone::QuantumViT {
177                    patch_size: 16,
178                    embed_dim: 768,
179                    num_heads: 12,
180                    depth: 12,
181                },
182                task_config: VisionTaskConfig::Classification {
183                    num_classes: 10,
184                    multi_label: false,
185                },
186                preprocessing: PreprocessingConfig::default(),
187                quantum_enhancement: QuantumEnhancement::High,
188            },
189        ),
190        (
191            "Hybrid CNN-Transformer",
192            QuantumVisionConfig {
193                num_qubits: 14,
194                encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
195                backbone: VisionBackbone::HybridBackbone {
196                    cnn_layers: 4,
197                    transformer_layers: 2,
198                },
199                task_config: VisionTaskConfig::Classification {
200                    num_classes: 10,
201                    multi_label: false,
202                },
203                preprocessing: PreprocessingConfig::default(),
204                quantum_enhancement: QuantumEnhancement::High,
205            },
206        ),
207    ];
208
209    for (name, config) in backbones {
210        println!("\n   --- {name} Backbone ---");
211
212        let mut pipeline = QuantumVisionPipeline::new(config)?;
213
214        // Test forward pass
215        let test_images = create_test_image(2, 3, 224, 224)?;
216        let output = pipeline.forward(&test_images)?;
217
218        if let TaskOutput::Classification {
219            logits,
220            probabilities,
221        } = &output
222        {
223            println!("   Output shape: {:?}", logits.dim());
224            println!("   Probability shape: {:?}", probabilities.dim());
225        }
226
227        // Get metrics
228        let metrics = pipeline.metrics();
229        println!("   Quantum metrics:");
230        println!(
231            "   - Circuit depth: {}",
232            metrics.quantum_metrics.circuit_depth
233        );
234        println!(
235            "   - Quantum advantage: {:.2}x",
236            metrics.quantum_metrics.quantum_advantage
237        );
238        println!(
239            "   - Coherence utilization: {:.1}%",
240            metrics.quantum_metrics.coherence_utilization * 100.0
241        );
242
243        // Architecture-specific properties
244        match name {
245            "Quantum CNN" => {
246                println!("   ✓ Hierarchical feature extraction with quantum convolutions");
247            }
248            "Quantum ViT" => {
249                println!("   ✓ Global context modeling with quantum attention");
250            }
251            "Hybrid CNN-Transformer" => {
252                println!("   ✓ Local features + global context integration");
253            }
254            _ => {}
255        }
256    }
257
258    Ok(())
259}
260
261/// Demonstrate image classification
262fn classification_demo() -> Result<()> {
263    println!("   Quantum image classification demo...");
264
265    // Create classification pipeline
266    let config = QuantumVisionConfig::default();
267    let mut pipeline = QuantumVisionPipeline::new(config)?;
268
269    // Create synthetic dataset
270    let num_classes = 10;
271    let num_samples = 20;
272    let (train_data, val_data) = create_classification_dataset(num_samples, num_classes)?;
273
274    println!(
275        "   Dataset: {} training, {} validation samples",
276        train_data.len(),
277        val_data.len()
278    );
279
280    // Train the model (simplified)
281    println!("\n   Training quantum classifier...");
282    let history = pipeline.train(
283        &train_data,
284        &val_data,
285        5, // epochs
286        OptimizationMethod::Adam,
287    )?;
288
289    // Display training results
290    println!("\n   Training results:");
291    for (epoch, train_loss, val_loss) in history
292        .epochs
293        .iter()
294        .zip(history.train_losses.iter())
295        .zip(history.val_losses.iter())
296        .map(|((e, t), v)| (e, t, v))
297    {
298        println!(
299            "   Epoch {}: train_loss={:.4}, val_loss={:.4}",
300            epoch + 1,
301            train_loss,
302            val_loss
303        );
304    }
305
306    // Test on new images
307    println!("\n   Testing on new images...");
308    let test_images = create_test_image(5, 3, 224, 224)?;
309    let predictions = pipeline.forward(&test_images)?;
310
311    if let TaskOutput::Classification { probabilities, .. } = predictions {
312        for (i, prob_row) in probabilities.outer_iter().enumerate() {
313            let (predicted_class, confidence) = prob_row
314                .iter()
315                .enumerate()
316                .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
317                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
318
319            println!(
320                "   Image {}: Class {} (confidence: {:.2}%)",
321                i + 1,
322                predicted_class,
323                confidence * 100.0
324            );
325        }
326    }
327
328    // Analyze quantum advantage
329    let quantum_advantage = analyze_classification_quantum_advantage(&pipeline)?;
330    println!("\n   Quantum advantage analysis:");
331    println!(
332        "   - Parameter efficiency: {:.2}x classical",
333        quantum_advantage.param_efficiency
334    );
335    println!(
336        "   - Feature expressiveness: {:.2}x",
337        quantum_advantage.expressiveness
338    );
339    println!(
340        "   - Training speedup: {:.2}x",
341        quantum_advantage.training_speedup
342    );
343
344    Ok(())
345}
346
347/// Demonstrate object detection
348fn object_detection_demo() -> Result<()> {
349    println!("   Quantum object detection demo...");
350
351    // Create detection pipeline
352    let config = QuantumVisionConfig::object_detection(80); // 80 classes (COCO-like)
353    let mut pipeline = QuantumVisionPipeline::new(config)?;
354
355    // Test image
356    let test_images = create_test_image(2, 3, 416, 416)?;
357
358    println!(
359        "   Processing {} images for object detection...",
360        test_images.dim().0
361    );
362
363    // Run detection
364    let detections = pipeline.forward(&test_images)?;
365
366    if let TaskOutput::Detection {
367        boxes,
368        scores,
369        classes,
370    } = detections
371    {
372        println!("   Detection results:");
373
374        for batch_idx in 0..boxes.dim().0 {
375            println!("\n   Image {}:", batch_idx + 1);
376
377            // Filter detections by score threshold
378            let threshold = 0.5;
379            let mut num_detections = 0;
380
381            for det_idx in 0..boxes.dim().1 {
382                let score = scores[[batch_idx, det_idx]];
383
384                if score > threshold {
385                    let class_id = classes[[batch_idx, det_idx]];
386                    let bbox = boxes.slice(scirs2_core::ndarray::s![batch_idx, det_idx, ..]);
387
388                    println!(
389                        "   - Object {}: Class {}, Score {:.3}, Box [{:.1}, {:.1}, {:.1}, {:.1}]",
390                        num_detections + 1,
391                        class_id,
392                        score,
393                        bbox[0],
394                        bbox[1],
395                        bbox[2],
396                        bbox[3]
397                    );
398
399                    num_detections += 1;
400                }
401            }
402
403            if num_detections == 0 {
404                println!("   - No objects detected above threshold");
405            } else {
406                println!("   Total objects detected: {num_detections}");
407            }
408        }
409    }
410
411    // Analyze detection performance
412    println!("\n   Detection performance analysis:");
413    println!("   - Quantum anchor generation improves localization");
414    println!("   - Entangled features enhance multi-scale detection");
415    println!("   - Quantum NMS reduces redundant detections");
416
417    Ok(())
418}
419
420/// Demonstrate semantic segmentation
421fn segmentation_demo() -> Result<()> {
422    println!("   Quantum semantic segmentation demo...");
423
424    // Create segmentation pipeline
425    let config = QuantumVisionConfig::segmentation(21); // 21 classes (Pascal VOC-like)
426    let mut pipeline = QuantumVisionPipeline::new(config)?;
427
428    // Test images
429    let test_images = create_test_image(1, 3, 512, 512)?;
430
431    println!("   Processing image for semantic segmentation...");
432
433    // Run segmentation
434    let segmentation = pipeline.forward(&test_images)?;
435
436    if let TaskOutput::Segmentation {
437        masks,
438        class_scores,
439    } = segmentation
440    {
441        println!("   Segmentation results:");
442        println!("   - Mask shape: {:?}", masks.dim());
443        println!("   - Class scores shape: {:?}", class_scores.dim());
444
445        // Analyze segmentation quality
446        let seg_metrics = analyze_segmentation_quality(&masks, &class_scores)?;
447        println!("\n   Segmentation metrics:");
448        println!("   - Mean IoU: {:.3}", seg_metrics.mean_iou);
449        println!(
450            "   - Pixel accuracy: {:.1}%",
451            seg_metrics.pixel_accuracy * 100.0
452        );
453        println!(
454            "   - Boundary precision: {:.3}",
455            seg_metrics.boundary_precision
456        );
457
458        // Class distribution
459        println!("\n   Predicted class distribution:");
460        let class_counts = compute_class_distribution(&masks)?;
461        for (class_id, count) in class_counts.iter().take(5) {
462            let percentage = *count as f64 / (512.0 * 512.0) * 100.0;
463            println!("   - Class {class_id}: {percentage:.1}% of pixels");
464        }
465    }
466
467    // Quantum advantages for segmentation
468    println!("\n   Quantum segmentation advantages:");
469    println!("   - Quantum attention captures long-range dependencies");
470    println!("   - Hierarchical encoding preserves multi-scale features");
471    println!("   - Entanglement enables pixel-to-pixel correlations");
472
473    Ok(())
474}
475
476/// Demonstrate feature extraction
477fn feature_extraction_demo() -> Result<()> {
478    println!("   Quantum feature extraction demo...");
479
480    // Create feature extraction pipeline
481    let config = QuantumVisionConfig {
482        num_qubits: 14,
483        encoding_method: ImageEncodingMethod::QPIE,
484        backbone: VisionBackbone::QuantumResNet {
485            blocks: vec![
486                ResidualBlock {
487                    channels: 64,
488                    kernel_size: 3,
489                    stride: 1,
490                    quantum_conv: true,
491                },
492                ResidualBlock {
493                    channels: 128,
494                    kernel_size: 3,
495                    stride: 2,
496                    quantum_conv: true,
497                },
498            ],
499            skip_connections: true,
500        },
501        task_config: VisionTaskConfig::FeatureExtraction {
502            feature_dim: 512,
503            normalize: true,
504        },
505        preprocessing: PreprocessingConfig::default(),
506        quantum_enhancement: QuantumEnhancement::High,
507    };
508
509    let mut pipeline = QuantumVisionPipeline::new(config)?;
510
511    // Extract features from multiple images
512    let num_images = 10;
513    let test_images = create_test_image(num_images, 3, 224, 224)?;
514
515    println!("   Extracting features from {num_images} images...");
516
517    let features_output = pipeline.forward(&test_images)?;
518
519    if let TaskOutput::Features {
520        features,
521        attention_maps,
522    } = features_output
523    {
524        println!("   Feature extraction results:");
525        println!("   - Feature dimension: {}", features.dim().1);
526        println!("   - Features normalized: Yes");
527
528        // Compute feature statistics
529        let feature_stats = compute_feature_statistics(&features)?;
530        println!("\n   Feature statistics:");
531        println!("   - Mean magnitude: {:.4}", feature_stats.mean_magnitude);
532        println!("   - Variance: {:.4}", feature_stats.variance);
533        println!("   - Sparsity: {:.1}%", feature_stats.sparsity * 100.0);
534
535        // Compute pairwise similarities
536        println!("\n   Feature similarity matrix (first 5 images):");
537        let similarities = compute_cosine_similarities(&features)?;
538
539        print!("       ");
540        for i in 0..5.min(num_images) {
541            print!("Img{}  ", i + 1);
542        }
543        println!();
544
545        for i in 0..5.min(num_images) {
546            print!("   Img{} ", i + 1);
547            for j in 0..5.min(num_images) {
548                print!("{:.3} ", similarities[[i, j]]);
549            }
550            println!();
551        }
552
553        // Quantum feature properties
554        println!("\n   Quantum feature properties:");
555        println!("   - Entanglement enhances discriminative power");
556        println!("   - Quantum superposition encodes multiple views");
557        println!("   - Phase information captures subtle variations");
558    }
559
560    Ok(())
561}
562
563/// Demonstrate multi-task learning
564fn multitask_demo() -> Result<()> {
565    println!("   Multi-task quantum vision demo...");
566
567    // Create a pipeline that can handle multiple tasks
568    let tasks = vec![
569        (
570            "Classification",
571            VisionTaskConfig::Classification {
572                num_classes: 10,
573                multi_label: false,
574            },
575        ),
576        (
577            "Detection",
578            VisionTaskConfig::ObjectDetection {
579                num_classes: 20,
580                anchor_sizes: vec![(32, 32), (64, 64)],
581                iou_threshold: 0.5,
582            },
583        ),
584        (
585            "Segmentation",
586            VisionTaskConfig::Segmentation {
587                num_classes: 10,
588                output_stride: 8,
589            },
590        ),
591    ];
592
593    println!(
594        "   Testing {} vision tasks with shared backbone...",
595        tasks.len()
596    );
597
598    // Use same backbone for all tasks
599    let base_config = QuantumVisionConfig {
600        num_qubits: 16,
601        encoding_method: ImageEncodingMethod::HierarchicalEncoding { levels: 3 },
602        backbone: VisionBackbone::HybridBackbone {
603            cnn_layers: 4,
604            transformer_layers: 2,
605        },
606        task_config: tasks[0].1.clone(), // Will be replaced for each task
607        preprocessing: PreprocessingConfig::default(),
608        quantum_enhancement: QuantumEnhancement::High,
609    };
610
611    // Test each task
612    let test_images = create_test_image(2, 3, 416, 416)?;
613
614    for (task_name, task_config) in tasks {
615        println!("\n   --- {task_name} Task ---");
616
617        let mut config = base_config.clone();
618        config.task_config = task_config;
619
620        let mut pipeline = QuantumVisionPipeline::new(config)?;
621        let output = pipeline.forward(&test_images)?;
622
623        match output {
624            TaskOutput::Classification { logits, .. } => {
625                println!("   Classification output shape: {:?}", logits.dim());
626            }
627            TaskOutput::Detection { boxes, scores, .. } => {
628                println!(
629                    "   Detection: {} anchors, score shape: {:?}",
630                    boxes.dim().1,
631                    scores.dim()
632                );
633            }
634            TaskOutput::Segmentation { masks, .. } => {
635                println!("   Segmentation mask shape: {:?}", masks.dim());
636            }
637            _ => {}
638        }
639
640        // Task-specific quantum advantages
641        match task_name {
642            "Classification" => {
643                println!("   ✓ Quantum features improve class discrimination");
644            }
645            "Detection" => {
646                println!("   ✓ Quantum anchors adapt to object scales");
647            }
648            "Segmentation" => {
649                println!("   ✓ Quantum correlations enhance boundary detection");
650            }
651            _ => {}
652        }
653    }
654
655    println!("\n   Multi-task benefits:");
656    println!("   - Shared quantum backbone reduces parameters");
657    println!("   - Task-specific quantum heads optimize performance");
658    println!("   - Quantum entanglement enables cross-task learning");
659
660    Ok(())
661}
662
663/// Demonstrate performance analysis
664fn performance_analysis_demo() -> Result<()> {
665    println!("   Analyzing quantum vision performance...");
666
667    // Compare different quantum enhancement levels
668    let enhancement_levels = vec![
669        ("Low", QuantumEnhancement::Low),
670        ("Medium", QuantumEnhancement::Medium),
671        ("High", QuantumEnhancement::High),
672        (
673            "Custom",
674            QuantumEnhancement::Custom {
675                quantum_layers: vec![0, 2, 4, 6],
676                entanglement_strength: 0.8,
677            },
678        ),
679    ];
680
681    println!("\n   Quantum Enhancement Level Comparison:");
682    println!("   Level    | FLOPs   | Memory  | Accuracy | Q-Advantage");
683    println!("   ---------|---------|---------|----------|------------");
684
685    for (level_name, enhancement) in enhancement_levels {
686        let config = QuantumVisionConfig {
687            num_qubits: 12,
688            encoding_method: ImageEncodingMethod::AmplitudeEncoding,
689            backbone: VisionBackbone::QuantumCNN {
690                conv_layers: vec![ConvolutionalConfig {
691                    num_filters: 32,
692                    kernel_size: 3,
693                    stride: 1,
694                    padding: 1,
695                    quantum_kernel: true,
696                    circuit_depth: 4,
697                }],
698                pooling_type: PoolingType::Quantum,
699            },
700            task_config: VisionTaskConfig::Classification {
701                num_classes: 10,
702                multi_label: false,
703            },
704            preprocessing: PreprocessingConfig::default(),
705            quantum_enhancement: enhancement,
706        };
707
708        let pipeline = QuantumVisionPipeline::new(config)?;
709        let metrics = pipeline.metrics();
710
711        // Simulate performance metrics
712        let (flops, memory, accuracy, q_advantage) = match level_name {
713            "Low" => (1.2, 50.0, 0.85, 1.2),
714            "Medium" => (2.5, 80.0, 0.88, 1.5),
715            "High" => (4.1, 120.0, 0.91, 2.1),
716            "Custom" => (3.2, 95.0, 0.90, 1.8),
717            _ => (0.0, 0.0, 0.0, 0.0),
718        };
719
720        println!(
721            "   {:<8} | {:.1}G | {:.0}MB | {:.1}%  | {:.1}x",
722            level_name,
723            flops,
724            memory,
725            accuracy * 100.0,
726            q_advantage
727        );
728    }
729
730    // Scalability analysis
731    println!("\n   Scalability Analysis:");
732    let image_sizes = vec![64, 128, 224, 416, 512];
733
734    println!("   Image Size | Inference Time | Throughput");
735    println!("   -----------|----------------|------------");
736
737    for size in image_sizes {
738        let inference_time = (f64::from(size) / 100.0).mul_add(f64::from(size) / 100.0, 5.0);
739        let throughput = 1000.0 / inference_time;
740
741        println!("   {size}x{size}   | {inference_time:.1}ms        | {throughput:.0} img/s");
742    }
743
744    // Quantum advantages summary
745    println!("\n   Quantum Computer Vision Advantages:");
746    println!("   1. Exponential feature space with limited qubits");
747    println!("   2. Natural multi-scale representation via entanglement");
748    println!("   3. Quantum attention for global context modeling");
749    println!("   4. Phase encoding for rotation-invariant features");
750    println!("   5. Quantum pooling preserves superposition information");
751
752    // Hardware requirements
753    println!("\n   Hardware Requirements:");
754    println!("   - Minimum qubits: 10 (basic tasks)");
755    println!("   - Recommended: 16-20 qubits (complex tasks)");
756    println!("   - Coherence time: >100μs for deep networks");
757    println!("   - Gate fidelity: >99.9% for accurate predictions");
758
759    Ok(())
760}

Trait Implementations§

Source§

impl Clone for QuantumVisionPipeline

Source§

fn clone(&self) -> QuantumVisionPipeline

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for QuantumVisionPipeline

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> DynClone for T
where T: Clone,

Source§

fn __clone_box(&self, _: Private) -> *mut ()

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V