pub struct QuantumAdversarialTrainer { /* private fields */ }Expand description
Quantum adversarial trainer
Implementations§
Source§impl QuantumAdversarialTrainer
impl QuantumAdversarialTrainer
Sourcepub fn new(
model: QuantumNeuralNetwork,
defense_strategy: QuantumDefenseStrategy,
config: AdversarialTrainingConfig,
) -> Self
pub fn new( model: QuantumNeuralNetwork, defense_strategy: QuantumDefenseStrategy, config: AdversarialTrainingConfig, ) -> Self
Create a new quantum adversarial trainer
Examples found in repository?
examples/quantum_adversarial.rs (line 68)
50fn adversarial_attack_demo() -> Result<()> {
51 // Create a quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 8 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65 let defense = create_comprehensive_defense();
66 let config = create_default_adversarial_config();
67
68 let trainer = QuantumAdversarialTrainer::new(model, defense, config);
69
70 println!(" Created quantum adversarial trainer"); // model.parameters field is private
71
72 // Test data
73 let test_data = Array2::from_shape_fn((10, 4), |(i, j)| {
74 0.2f64.mul_add(j as f64 / 4.0, 0.3f64.mul_add(i as f64 / 10.0, 0.5))
75 });
76 let test_labels = Array1::from_shape_fn(10, |i| i % 2);
77
78 println!("\n Testing different attack methods:");
79
80 // FGSM Attack
81 println!(" - Fast Gradient Sign Method (FGSM)...");
82 let fgsm_examples = trainer.generate_adversarial_examples(
83 &test_data,
84 &test_labels,
85 QuantumAttackType::FGSM { epsilon: 0.1 },
86 )?;
87
88 let fgsm_success_rate = fgsm_examples
89 .iter()
90 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
91 .sum::<f64>()
92 / fgsm_examples.len() as f64;
93
94 println!(" Success rate: {:.2}%", fgsm_success_rate * 100.0);
95
96 if let Some(example) = fgsm_examples.first() {
97 println!(
98 " Average perturbation: {:.4}",
99 example.perturbation_norm
100 );
101 }
102
103 // PGD Attack
104 println!(" - Projected Gradient Descent (PGD)...");
105 let pgd_examples = trainer.generate_adversarial_examples(
106 &test_data,
107 &test_labels,
108 QuantumAttackType::PGD {
109 epsilon: 0.1,
110 alpha: 0.01,
111 num_steps: 10,
112 },
113 )?;
114
115 let pgd_success_rate = pgd_examples
116 .iter()
117 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
118 .sum::<f64>()
119 / pgd_examples.len() as f64;
120
121 println!(" Success rate: {:.2}%", pgd_success_rate * 100.0);
122
123 // Parameter Shift Attack
124 println!(" - Parameter Shift Attack...");
125 let param_examples = trainer.generate_adversarial_examples(
126 &test_data,
127 &test_labels,
128 QuantumAttackType::ParameterShift {
129 shift_magnitude: 0.05,
130 target_parameters: None,
131 },
132 )?;
133
134 let param_success_rate = param_examples
135 .iter()
136 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
137 .sum::<f64>()
138 / param_examples.len() as f64;
139
140 println!(" Success rate: {:.2}%", param_success_rate * 100.0);
141
142 // Quantum State Perturbation
143 println!(" - Quantum State Perturbation...");
144 let state_examples = trainer.generate_adversarial_examples(
145 &test_data,
146 &test_labels,
147 QuantumAttackType::StatePerturbation {
148 perturbation_strength: 0.1,
149 basis: "pauli_z".to_string(),
150 },
151 )?;
152
153 let state_success_rate = state_examples
154 .iter()
155 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
156 .sum::<f64>()
157 / state_examples.len() as f64;
158
159 println!(" Success rate: {:.2}%", state_success_rate * 100.0);
160
161 Ok(())
162}
163
164/// Demonstrate defense mechanisms
165fn defense_mechanisms_demo() -> Result<()> {
166 println!(" Testing defense strategies:");
167
168 // Input preprocessing defense
169 println!(" - Input Preprocessing...");
170 let preprocessing_defense = QuantumDefenseStrategy::InputPreprocessing {
171 noise_addition: 0.05,
172 feature_squeezing: true,
173 };
174
175 let layers = vec![
176 QNNLayerType::EncodingLayer { num_features: 4 },
177 QNNLayerType::VariationalLayer { num_params: 6 },
178 QNNLayerType::MeasurementLayer {
179 measurement_basis: "computational".to_string(),
180 },
181 ];
182
183 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
184 let config = create_default_adversarial_config();
185 let trainer = QuantumAdversarialTrainer::new(model, preprocessing_defense, config.clone());
186
187 let test_input = Array1::from_vec(vec![0.51, 0.32, 0.83, 0.24]);
188 let defended_input = trainer.apply_defense(&test_input)?;
189
190 let defense_effect = (&defended_input - &test_input).mapv(f64::abs).sum();
191 println!(" Defense effect magnitude: {defense_effect:.4}");
192
193 // Randomized circuit defense
194 println!(" - Randomized Circuit Defense...");
195 let randomized_defense = QuantumDefenseStrategy::RandomizedCircuit {
196 randomization_strength: 0.1,
197 num_random_layers: 2,
198 };
199
200 let layers2 = vec![
201 QNNLayerType::EncodingLayer { num_features: 4 },
202 QNNLayerType::VariationalLayer { num_params: 8 },
203 ];
204
205 let model2 = QuantumNeuralNetwork::new(layers2, 4, 4, 2)?;
206 let trainer2 = QuantumAdversarialTrainer::new(model2, randomized_defense, config);
207
208 let defended_input2 = trainer2.apply_defense(&test_input)?;
209 let randomization_effect = (&defended_input2 - &test_input).mapv(f64::abs).sum();
210 println!(" Randomization effect: {randomization_effect:.4}");
211
212 // Quantum error correction defense
213 println!(" - Quantum Error Correction...");
214 let qec_defense = QuantumDefenseStrategy::QuantumErrorCorrection {
215 code_type: "surface_code".to_string(),
216 correction_threshold: 0.01,
217 };
218
219 println!(" Error correction configured with surface codes");
220 println!(" Correction threshold: 1%");
221
222 Ok(())
223}
224
225/// Demonstrate adversarial training process
226fn adversarial_training_demo() -> Result<()> {
227 // Create model and trainer
228 let layers = vec![
229 QNNLayerType::EncodingLayer { num_features: 4 },
230 QNNLayerType::VariationalLayer { num_params: 12 },
231 QNNLayerType::EntanglementLayer {
232 connectivity: "circular".to_string(),
233 },
234 QNNLayerType::MeasurementLayer {
235 measurement_basis: "computational".to_string(),
236 },
237 ];
238
239 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
240
241 let defense = QuantumDefenseStrategy::AdversarialTraining {
242 attack_types: vec![
243 QuantumAttackType::FGSM { epsilon: 0.08 },
244 QuantumAttackType::PGD {
245 epsilon: 0.08,
246 alpha: 0.01,
247 num_steps: 7,
248 },
249 ],
250 adversarial_ratio: 0.4,
251 };
252
253 let mut config = create_default_adversarial_config();
254 config.epochs = 20; // Reduced for demo
255 config.eval_interval = 5;
256
257 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
258
259 println!(" Adversarial training configuration:");
260 println!(" - Attack types: FGSM + PGD");
261 println!(" - Adversarial ratio: 40%");
262 println!(" - Training epochs: 20");
263
264 // Generate synthetic training data
265 let train_data = generate_quantum_dataset(200, 4);
266 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
267
268 let val_data = generate_quantum_dataset(50, 4);
269 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
270
271 // Train with adversarial examples
272 println!("\n Starting adversarial training...");
273 let mut optimizer = Adam::new(0.001);
274 let losses = trainer.train(
275 &train_data,
276 &train_labels,
277 &val_data,
278 &val_labels,
279 &mut optimizer,
280 )?;
281
282 println!(" Training completed!");
283 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
284
285 // Show final robustness metrics
286 let metrics = trainer.get_robustness_metrics();
287 println!("\n Final robustness metrics:");
288 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
289 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
290 println!(
291 " - Attack success rate: {:.3}",
292 metrics.attack_success_rate
293 );
294
295 Ok(())
296}
297
298/// Demonstrate robustness evaluation
299fn robustness_evaluation_demo() -> Result<()> {
300 // Create trained model (simplified)
301 let layers = vec![
302 QNNLayerType::EncodingLayer { num_features: 4 },
303 QNNLayerType::VariationalLayer { num_params: 8 },
304 QNNLayerType::MeasurementLayer {
305 measurement_basis: "computational".to_string(),
306 },
307 ];
308
309 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
310 let defense = create_comprehensive_defense();
311 let config = create_default_adversarial_config();
312
313 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
314
315 println!(" Evaluating model robustness...");
316
317 // Test data
318 let test_data = generate_quantum_dataset(100, 4);
319 let test_labels = Array1::from_shape_fn(100, |i| i % 2);
320
321 // Evaluate against different attack strengths
322 let epsilons = vec![0.05, 0.1, 0.15, 0.2];
323
324 println!("\n Robustness vs. attack strength:");
325 for &epsilon in &epsilons {
326 let attack_examples = trainer.generate_adversarial_examples(
327 &test_data,
328 &test_labels,
329 QuantumAttackType::FGSM { epsilon },
330 )?;
331
332 let success_rate = attack_examples
333 .iter()
334 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
335 .sum::<f64>()
336 / attack_examples.len() as f64;
337
338 let avg_perturbation = attack_examples
339 .iter()
340 .map(|ex| ex.perturbation_norm)
341 .sum::<f64>()
342 / attack_examples.len() as f64;
343
344 println!(
345 " ε = {:.2}: Attack success = {:.1}%, Avg perturbation = {:.4}",
346 epsilon,
347 success_rate * 100.0,
348 avg_perturbation
349 );
350 }
351
352 // Test different attack types
353 println!("\n Attack type comparison:");
354 let attack_types = vec![
355 ("FGSM", QuantumAttackType::FGSM { epsilon: 0.1 }),
356 (
357 "PGD",
358 QuantumAttackType::PGD {
359 epsilon: 0.1,
360 alpha: 0.01,
361 num_steps: 10,
362 },
363 ),
364 (
365 "Parameter Shift",
366 QuantumAttackType::ParameterShift {
367 shift_magnitude: 0.05,
368 target_parameters: None,
369 },
370 ),
371 (
372 "State Perturbation",
373 QuantumAttackType::StatePerturbation {
374 perturbation_strength: 0.1,
375 basis: "pauli_z".to_string(),
376 },
377 ),
378 ];
379
380 for (name, attack_type) in attack_types {
381 let examples = trainer.generate_adversarial_examples(
382 &test_data.slice(s![0..20, ..]).to_owned(),
383 &test_labels.slice(s![0..20]).to_owned(),
384 attack_type,
385 )?;
386
387 let success_rate = examples
388 .iter()
389 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
390 .sum::<f64>()
391 / examples.len() as f64;
392
393 println!(" {}: {:.1}% success rate", name, success_rate * 100.0);
394 }
395
396 Ok(())
397}
398
399/// Demonstrate certified defense
400fn certified_defense_demo() -> Result<()> {
401 let layers = vec![
402 QNNLayerType::EncodingLayer { num_features: 4 },
403 QNNLayerType::VariationalLayer { num_params: 6 },
404 QNNLayerType::MeasurementLayer {
405 measurement_basis: "computational".to_string(),
406 },
407 ];
408
409 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
410
411 let certified_defense = QuantumDefenseStrategy::CertifiedDefense {
412 smoothing_variance: 0.1,
413 confidence_level: 0.95,
414 };
415
416 let config = create_default_adversarial_config();
417 let trainer = QuantumAdversarialTrainer::new(model, certified_defense, config);
418
419 println!(" Certified defense analysis:");
420 println!(" - Smoothing variance: 0.1");
421 println!(" - Confidence level: 95%");
422
423 // Generate test data
424 let test_data = generate_quantum_dataset(50, 4);
425
426 // Perform certified analysis
427 println!("\n Running randomized smoothing certification...");
428 let certified_accuracy = trainer.certified_defense_analysis(
429 &test_data, 0.1, // smoothing variance
430 100, // number of samples
431 )?;
432
433 println!(" Certified accuracy: {:.2}%", certified_accuracy * 100.0);
434
435 // Compare with different smoothing levels
436 let smoothing_levels = vec![0.05, 0.1, 0.15, 0.2];
437 println!("\n Certified accuracy vs. smoothing variance:");
438
439 for &variance in &smoothing_levels {
440 let cert_acc = trainer.certified_defense_analysis(&test_data, variance, 50)?;
441 println!(" σ = {:.2}: {:.1}% certified", variance, cert_acc * 100.0);
442 }
443
444 Ok(())
445}
446
447/// Compare different attack methods
448fn attack_comparison_demo() -> Result<()> {
449 let layers = vec![
450 QNNLayerType::EncodingLayer { num_features: 4 },
451 QNNLayerType::VariationalLayer { num_params: 10 },
452 QNNLayerType::EntanglementLayer {
453 connectivity: "full".to_string(),
454 },
455 QNNLayerType::MeasurementLayer {
456 measurement_basis: "computational".to_string(),
457 },
458 ];
459
460 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
461 let defense = create_comprehensive_defense();
462 let config = create_default_adversarial_config();
463
464 let trainer = QuantumAdversarialTrainer::new(model, defense, config);
465
466 println!(" Comprehensive attack comparison:");
467
468 let test_data = generate_quantum_dataset(30, 4);
469 let test_labels = Array1::from_shape_fn(30, |i| i % 2);
470
471 // Test multiple attack configurations
472 let attack_configs = vec![
473 ("FGSM (ε=0.05)", QuantumAttackType::FGSM { epsilon: 0.05 }),
474 ("FGSM (ε=0.1)", QuantumAttackType::FGSM { epsilon: 0.1 }),
475 (
476 "PGD-5",
477 QuantumAttackType::PGD {
478 epsilon: 0.1,
479 alpha: 0.02,
480 num_steps: 5,
481 },
482 ),
483 (
484 "PGD-10",
485 QuantumAttackType::PGD {
486 epsilon: 0.1,
487 alpha: 0.01,
488 num_steps: 10,
489 },
490 ),
491 (
492 "Parameter Shift",
493 QuantumAttackType::ParameterShift {
494 shift_magnitude: 0.1,
495 target_parameters: None,
496 },
497 ),
498 (
499 "Circuit Manipulation",
500 QuantumAttackType::CircuitManipulation {
501 gate_error_rate: 0.01,
502 coherence_time: 100.0,
503 },
504 ),
505 ];
506
507 println!("\n Attack effectiveness comparison:");
508 println!(
509 " {:20} {:>12} {:>15} {:>15}",
510 "Attack Type", "Success Rate", "Avg Perturbation", "Effectiveness"
511 );
512
513 for (name, attack_type) in attack_configs {
514 let examples =
515 trainer.generate_adversarial_examples(&test_data, &test_labels, attack_type)?;
516
517 let success_rate = examples
518 .iter()
519 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
520 .sum::<f64>()
521 / examples.len() as f64;
522
523 let avg_perturbation =
524 examples.iter().map(|ex| ex.perturbation_norm).sum::<f64>() / examples.len() as f64;
525
526 let effectiveness = if avg_perturbation > 0.0 {
527 success_rate / avg_perturbation
528 } else {
529 0.0
530 };
531
532 println!(
533 " {:20} {:>11.1}% {:>14.4} {:>14.2}",
534 name,
535 success_rate * 100.0,
536 avg_perturbation,
537 effectiveness
538 );
539 }
540
541 Ok(())
542}
543
544/// Demonstrate ensemble defense
545fn ensemble_defense_demo() -> Result<()> {
546 println!(" Ensemble defense strategy:");
547
548 let ensemble_defense = QuantumDefenseStrategy::EnsembleDefense {
549 num_models: 5,
550 diversity_metric: "parameter_diversity".to_string(),
551 };
552
553 let layers = vec![
554 QNNLayerType::EncodingLayer { num_features: 4 },
555 QNNLayerType::VariationalLayer { num_params: 8 },
556 QNNLayerType::MeasurementLayer {
557 measurement_basis: "computational".to_string(),
558 },
559 ];
560
561 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
562 let config = create_default_adversarial_config();
563
564 let mut trainer = QuantumAdversarialTrainer::new(model, ensemble_defense, config);
565
566 println!(" - Number of models: 5");
567 println!(" - Diversity metric: Parameter diversity");
568
569 // Initialize ensemble
570 println!("\n Initializing ensemble models...");
571 // trainer.initialize_ensemble()?; // Method is private
572 println!(" Ensemble initialized (placeholder)");
573
574 println!(" Ensemble initialized successfully");
575
576 // Test ensemble robustness (simplified)
577 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
578
579 println!("\n Ensemble prediction characteristics:");
580 println!(" - Improved robustness through model diversity");
581 println!(" - Reduced attack transferability");
582 println!(" - Majority voting for final predictions");
583
584 // Compare single model vs ensemble attack success
585 // let single_model_attack = trainer.generate_single_adversarial_example(
586 // &test_input,
587 // 0,
588 // QuantumAttackType::FGSM { epsilon: 0.1 }
589 // )?;
590 // Method is private - using public generate_adversarial_examples instead
591 let single_model_attack = trainer.generate_adversarial_examples(
592 &Array2::from_shape_vec((1, test_input.len()), test_input.to_vec())?,
593 &Array1::from_vec(vec![0]),
594 QuantumAttackType::FGSM { epsilon: 0.1 },
595 )?[0]
596 .clone();
597
598 println!("\n Single model vs. ensemble comparison:");
599 println!(
600 " - Single model attack success: {}",
601 if single_model_attack.attack_success {
602 "Yes"
603 } else {
604 "No"
605 }
606 );
607 println!(
608 " - Perturbation magnitude: {:.4}",
609 single_model_attack.perturbation_norm
610 );
611
612 Ok(())
613}Sourcepub fn train(
&mut self,
train_data: &Array2<f64>,
train_labels: &Array1<usize>,
val_data: &Array2<f64>,
val_labels: &Array1<usize>,
optimizer: &mut dyn Optimizer,
) -> Result<Vec<f64>>
pub fn train( &mut self, train_data: &Array2<f64>, train_labels: &Array1<usize>, val_data: &Array2<f64>, val_labels: &Array1<usize>, optimizer: &mut dyn Optimizer, ) -> Result<Vec<f64>>
Train the model with adversarial training
Examples found in repository?
examples/quantum_adversarial.rs (lines 274-280)
226fn adversarial_training_demo() -> Result<()> {
227 // Create model and trainer
228 let layers = vec![
229 QNNLayerType::EncodingLayer { num_features: 4 },
230 QNNLayerType::VariationalLayer { num_params: 12 },
231 QNNLayerType::EntanglementLayer {
232 connectivity: "circular".to_string(),
233 },
234 QNNLayerType::MeasurementLayer {
235 measurement_basis: "computational".to_string(),
236 },
237 ];
238
239 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
240
241 let defense = QuantumDefenseStrategy::AdversarialTraining {
242 attack_types: vec![
243 QuantumAttackType::FGSM { epsilon: 0.08 },
244 QuantumAttackType::PGD {
245 epsilon: 0.08,
246 alpha: 0.01,
247 num_steps: 7,
248 },
249 ],
250 adversarial_ratio: 0.4,
251 };
252
253 let mut config = create_default_adversarial_config();
254 config.epochs = 20; // Reduced for demo
255 config.eval_interval = 5;
256
257 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
258
259 println!(" Adversarial training configuration:");
260 println!(" - Attack types: FGSM + PGD");
261 println!(" - Adversarial ratio: 40%");
262 println!(" - Training epochs: 20");
263
264 // Generate synthetic training data
265 let train_data = generate_quantum_dataset(200, 4);
266 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
267
268 let val_data = generate_quantum_dataset(50, 4);
269 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
270
271 // Train with adversarial examples
272 println!("\n Starting adversarial training...");
273 let mut optimizer = Adam::new(0.001);
274 let losses = trainer.train(
275 &train_data,
276 &train_labels,
277 &val_data,
278 &val_labels,
279 &mut optimizer,
280 )?;
281
282 println!(" Training completed!");
283 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
284
285 // Show final robustness metrics
286 let metrics = trainer.get_robustness_metrics();
287 println!("\n Final robustness metrics:");
288 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
289 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
290 println!(
291 " - Attack success rate: {:.3}",
292 metrics.attack_success_rate
293 );
294
295 Ok(())
296}Sourcepub fn generate_adversarial_examples(
&self,
data: &Array2<f64>,
labels: &Array1<usize>,
attack_type: QuantumAttackType,
) -> Result<Vec<QuantumAdversarialExample>>
pub fn generate_adversarial_examples( &self, data: &Array2<f64>, labels: &Array1<usize>, attack_type: QuantumAttackType, ) -> Result<Vec<QuantumAdversarialExample>>
Generate adversarial examples using specified attack
Examples found in repository?
examples/quantum_adversarial.rs (lines 82-86)
50fn adversarial_attack_demo() -> Result<()> {
51 // Create a quantum model
52 let layers = vec![
53 QNNLayerType::EncodingLayer { num_features: 4 },
54 QNNLayerType::VariationalLayer { num_params: 8 },
55 QNNLayerType::EntanglementLayer {
56 connectivity: "circular".to_string(),
57 },
58 QNNLayerType::VariationalLayer { num_params: 8 },
59 QNNLayerType::MeasurementLayer {
60 measurement_basis: "computational".to_string(),
61 },
62 ];
63
64 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
65 let defense = create_comprehensive_defense();
66 let config = create_default_adversarial_config();
67
68 let trainer = QuantumAdversarialTrainer::new(model, defense, config);
69
70 println!(" Created quantum adversarial trainer"); // model.parameters field is private
71
72 // Test data
73 let test_data = Array2::from_shape_fn((10, 4), |(i, j)| {
74 0.2f64.mul_add(j as f64 / 4.0, 0.3f64.mul_add(i as f64 / 10.0, 0.5))
75 });
76 let test_labels = Array1::from_shape_fn(10, |i| i % 2);
77
78 println!("\n Testing different attack methods:");
79
80 // FGSM Attack
81 println!(" - Fast Gradient Sign Method (FGSM)...");
82 let fgsm_examples = trainer.generate_adversarial_examples(
83 &test_data,
84 &test_labels,
85 QuantumAttackType::FGSM { epsilon: 0.1 },
86 )?;
87
88 let fgsm_success_rate = fgsm_examples
89 .iter()
90 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
91 .sum::<f64>()
92 / fgsm_examples.len() as f64;
93
94 println!(" Success rate: {:.2}%", fgsm_success_rate * 100.0);
95
96 if let Some(example) = fgsm_examples.first() {
97 println!(
98 " Average perturbation: {:.4}",
99 example.perturbation_norm
100 );
101 }
102
103 // PGD Attack
104 println!(" - Projected Gradient Descent (PGD)...");
105 let pgd_examples = trainer.generate_adversarial_examples(
106 &test_data,
107 &test_labels,
108 QuantumAttackType::PGD {
109 epsilon: 0.1,
110 alpha: 0.01,
111 num_steps: 10,
112 },
113 )?;
114
115 let pgd_success_rate = pgd_examples
116 .iter()
117 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
118 .sum::<f64>()
119 / pgd_examples.len() as f64;
120
121 println!(" Success rate: {:.2}%", pgd_success_rate * 100.0);
122
123 // Parameter Shift Attack
124 println!(" - Parameter Shift Attack...");
125 let param_examples = trainer.generate_adversarial_examples(
126 &test_data,
127 &test_labels,
128 QuantumAttackType::ParameterShift {
129 shift_magnitude: 0.05,
130 target_parameters: None,
131 },
132 )?;
133
134 let param_success_rate = param_examples
135 .iter()
136 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
137 .sum::<f64>()
138 / param_examples.len() as f64;
139
140 println!(" Success rate: {:.2}%", param_success_rate * 100.0);
141
142 // Quantum State Perturbation
143 println!(" - Quantum State Perturbation...");
144 let state_examples = trainer.generate_adversarial_examples(
145 &test_data,
146 &test_labels,
147 QuantumAttackType::StatePerturbation {
148 perturbation_strength: 0.1,
149 basis: "pauli_z".to_string(),
150 },
151 )?;
152
153 let state_success_rate = state_examples
154 .iter()
155 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
156 .sum::<f64>()
157 / state_examples.len() as f64;
158
159 println!(" Success rate: {:.2}%", state_success_rate * 100.0);
160
161 Ok(())
162}
163
164/// Demonstrate defense mechanisms
165fn defense_mechanisms_demo() -> Result<()> {
166 println!(" Testing defense strategies:");
167
168 // Input preprocessing defense
169 println!(" - Input Preprocessing...");
170 let preprocessing_defense = QuantumDefenseStrategy::InputPreprocessing {
171 noise_addition: 0.05,
172 feature_squeezing: true,
173 };
174
175 let layers = vec![
176 QNNLayerType::EncodingLayer { num_features: 4 },
177 QNNLayerType::VariationalLayer { num_params: 6 },
178 QNNLayerType::MeasurementLayer {
179 measurement_basis: "computational".to_string(),
180 },
181 ];
182
183 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
184 let config = create_default_adversarial_config();
185 let trainer = QuantumAdversarialTrainer::new(model, preprocessing_defense, config.clone());
186
187 let test_input = Array1::from_vec(vec![0.51, 0.32, 0.83, 0.24]);
188 let defended_input = trainer.apply_defense(&test_input)?;
189
190 let defense_effect = (&defended_input - &test_input).mapv(f64::abs).sum();
191 println!(" Defense effect magnitude: {defense_effect:.4}");
192
193 // Randomized circuit defense
194 println!(" - Randomized Circuit Defense...");
195 let randomized_defense = QuantumDefenseStrategy::RandomizedCircuit {
196 randomization_strength: 0.1,
197 num_random_layers: 2,
198 };
199
200 let layers2 = vec![
201 QNNLayerType::EncodingLayer { num_features: 4 },
202 QNNLayerType::VariationalLayer { num_params: 8 },
203 ];
204
205 let model2 = QuantumNeuralNetwork::new(layers2, 4, 4, 2)?;
206 let trainer2 = QuantumAdversarialTrainer::new(model2, randomized_defense, config);
207
208 let defended_input2 = trainer2.apply_defense(&test_input)?;
209 let randomization_effect = (&defended_input2 - &test_input).mapv(f64::abs).sum();
210 println!(" Randomization effect: {randomization_effect:.4}");
211
212 // Quantum error correction defense
213 println!(" - Quantum Error Correction...");
214 let qec_defense = QuantumDefenseStrategy::QuantumErrorCorrection {
215 code_type: "surface_code".to_string(),
216 correction_threshold: 0.01,
217 };
218
219 println!(" Error correction configured with surface codes");
220 println!(" Correction threshold: 1%");
221
222 Ok(())
223}
224
225/// Demonstrate adversarial training process
226fn adversarial_training_demo() -> Result<()> {
227 // Create model and trainer
228 let layers = vec![
229 QNNLayerType::EncodingLayer { num_features: 4 },
230 QNNLayerType::VariationalLayer { num_params: 12 },
231 QNNLayerType::EntanglementLayer {
232 connectivity: "circular".to_string(),
233 },
234 QNNLayerType::MeasurementLayer {
235 measurement_basis: "computational".to_string(),
236 },
237 ];
238
239 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
240
241 let defense = QuantumDefenseStrategy::AdversarialTraining {
242 attack_types: vec![
243 QuantumAttackType::FGSM { epsilon: 0.08 },
244 QuantumAttackType::PGD {
245 epsilon: 0.08,
246 alpha: 0.01,
247 num_steps: 7,
248 },
249 ],
250 adversarial_ratio: 0.4,
251 };
252
253 let mut config = create_default_adversarial_config();
254 config.epochs = 20; // Reduced for demo
255 config.eval_interval = 5;
256
257 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
258
259 println!(" Adversarial training configuration:");
260 println!(" - Attack types: FGSM + PGD");
261 println!(" - Adversarial ratio: 40%");
262 println!(" - Training epochs: 20");
263
264 // Generate synthetic training data
265 let train_data = generate_quantum_dataset(200, 4);
266 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
267
268 let val_data = generate_quantum_dataset(50, 4);
269 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
270
271 // Train with adversarial examples
272 println!("\n Starting adversarial training...");
273 let mut optimizer = Adam::new(0.001);
274 let losses = trainer.train(
275 &train_data,
276 &train_labels,
277 &val_data,
278 &val_labels,
279 &mut optimizer,
280 )?;
281
282 println!(" Training completed!");
283 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
284
285 // Show final robustness metrics
286 let metrics = trainer.get_robustness_metrics();
287 println!("\n Final robustness metrics:");
288 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
289 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
290 println!(
291 " - Attack success rate: {:.3}",
292 metrics.attack_success_rate
293 );
294
295 Ok(())
296}
297
298/// Demonstrate robustness evaluation
299fn robustness_evaluation_demo() -> Result<()> {
300 // Create trained model (simplified)
301 let layers = vec![
302 QNNLayerType::EncodingLayer { num_features: 4 },
303 QNNLayerType::VariationalLayer { num_params: 8 },
304 QNNLayerType::MeasurementLayer {
305 measurement_basis: "computational".to_string(),
306 },
307 ];
308
309 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
310 let defense = create_comprehensive_defense();
311 let config = create_default_adversarial_config();
312
313 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
314
315 println!(" Evaluating model robustness...");
316
317 // Test data
318 let test_data = generate_quantum_dataset(100, 4);
319 let test_labels = Array1::from_shape_fn(100, |i| i % 2);
320
321 // Evaluate against different attack strengths
322 let epsilons = vec![0.05, 0.1, 0.15, 0.2];
323
324 println!("\n Robustness vs. attack strength:");
325 for &epsilon in &epsilons {
326 let attack_examples = trainer.generate_adversarial_examples(
327 &test_data,
328 &test_labels,
329 QuantumAttackType::FGSM { epsilon },
330 )?;
331
332 let success_rate = attack_examples
333 .iter()
334 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
335 .sum::<f64>()
336 / attack_examples.len() as f64;
337
338 let avg_perturbation = attack_examples
339 .iter()
340 .map(|ex| ex.perturbation_norm)
341 .sum::<f64>()
342 / attack_examples.len() as f64;
343
344 println!(
345 " ε = {:.2}: Attack success = {:.1}%, Avg perturbation = {:.4}",
346 epsilon,
347 success_rate * 100.0,
348 avg_perturbation
349 );
350 }
351
352 // Test different attack types
353 println!("\n Attack type comparison:");
354 let attack_types = vec![
355 ("FGSM", QuantumAttackType::FGSM { epsilon: 0.1 }),
356 (
357 "PGD",
358 QuantumAttackType::PGD {
359 epsilon: 0.1,
360 alpha: 0.01,
361 num_steps: 10,
362 },
363 ),
364 (
365 "Parameter Shift",
366 QuantumAttackType::ParameterShift {
367 shift_magnitude: 0.05,
368 target_parameters: None,
369 },
370 ),
371 (
372 "State Perturbation",
373 QuantumAttackType::StatePerturbation {
374 perturbation_strength: 0.1,
375 basis: "pauli_z".to_string(),
376 },
377 ),
378 ];
379
380 for (name, attack_type) in attack_types {
381 let examples = trainer.generate_adversarial_examples(
382 &test_data.slice(s![0..20, ..]).to_owned(),
383 &test_labels.slice(s![0..20]).to_owned(),
384 attack_type,
385 )?;
386
387 let success_rate = examples
388 .iter()
389 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
390 .sum::<f64>()
391 / examples.len() as f64;
392
393 println!(" {}: {:.1}% success rate", name, success_rate * 100.0);
394 }
395
396 Ok(())
397}
398
399/// Demonstrate certified defense
400fn certified_defense_demo() -> Result<()> {
401 let layers = vec![
402 QNNLayerType::EncodingLayer { num_features: 4 },
403 QNNLayerType::VariationalLayer { num_params: 6 },
404 QNNLayerType::MeasurementLayer {
405 measurement_basis: "computational".to_string(),
406 },
407 ];
408
409 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
410
411 let certified_defense = QuantumDefenseStrategy::CertifiedDefense {
412 smoothing_variance: 0.1,
413 confidence_level: 0.95,
414 };
415
416 let config = create_default_adversarial_config();
417 let trainer = QuantumAdversarialTrainer::new(model, certified_defense, config);
418
419 println!(" Certified defense analysis:");
420 println!(" - Smoothing variance: 0.1");
421 println!(" - Confidence level: 95%");
422
423 // Generate test data
424 let test_data = generate_quantum_dataset(50, 4);
425
426 // Perform certified analysis
427 println!("\n Running randomized smoothing certification...");
428 let certified_accuracy = trainer.certified_defense_analysis(
429 &test_data, 0.1, // smoothing variance
430 100, // number of samples
431 )?;
432
433 println!(" Certified accuracy: {:.2}%", certified_accuracy * 100.0);
434
435 // Compare with different smoothing levels
436 let smoothing_levels = vec![0.05, 0.1, 0.15, 0.2];
437 println!("\n Certified accuracy vs. smoothing variance:");
438
439 for &variance in &smoothing_levels {
440 let cert_acc = trainer.certified_defense_analysis(&test_data, variance, 50)?;
441 println!(" σ = {:.2}: {:.1}% certified", variance, cert_acc * 100.0);
442 }
443
444 Ok(())
445}
446
447/// Compare different attack methods
448fn attack_comparison_demo() -> Result<()> {
449 let layers = vec![
450 QNNLayerType::EncodingLayer { num_features: 4 },
451 QNNLayerType::VariationalLayer { num_params: 10 },
452 QNNLayerType::EntanglementLayer {
453 connectivity: "full".to_string(),
454 },
455 QNNLayerType::MeasurementLayer {
456 measurement_basis: "computational".to_string(),
457 },
458 ];
459
460 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
461 let defense = create_comprehensive_defense();
462 let config = create_default_adversarial_config();
463
464 let trainer = QuantumAdversarialTrainer::new(model, defense, config);
465
466 println!(" Comprehensive attack comparison:");
467
468 let test_data = generate_quantum_dataset(30, 4);
469 let test_labels = Array1::from_shape_fn(30, |i| i % 2);
470
471 // Test multiple attack configurations
472 let attack_configs = vec![
473 ("FGSM (ε=0.05)", QuantumAttackType::FGSM { epsilon: 0.05 }),
474 ("FGSM (ε=0.1)", QuantumAttackType::FGSM { epsilon: 0.1 }),
475 (
476 "PGD-5",
477 QuantumAttackType::PGD {
478 epsilon: 0.1,
479 alpha: 0.02,
480 num_steps: 5,
481 },
482 ),
483 (
484 "PGD-10",
485 QuantumAttackType::PGD {
486 epsilon: 0.1,
487 alpha: 0.01,
488 num_steps: 10,
489 },
490 ),
491 (
492 "Parameter Shift",
493 QuantumAttackType::ParameterShift {
494 shift_magnitude: 0.1,
495 target_parameters: None,
496 },
497 ),
498 (
499 "Circuit Manipulation",
500 QuantumAttackType::CircuitManipulation {
501 gate_error_rate: 0.01,
502 coherence_time: 100.0,
503 },
504 ),
505 ];
506
507 println!("\n Attack effectiveness comparison:");
508 println!(
509 " {:20} {:>12} {:>15} {:>15}",
510 "Attack Type", "Success Rate", "Avg Perturbation", "Effectiveness"
511 );
512
513 for (name, attack_type) in attack_configs {
514 let examples =
515 trainer.generate_adversarial_examples(&test_data, &test_labels, attack_type)?;
516
517 let success_rate = examples
518 .iter()
519 .map(|ex| if ex.attack_success { 1.0 } else { 0.0 })
520 .sum::<f64>()
521 / examples.len() as f64;
522
523 let avg_perturbation =
524 examples.iter().map(|ex| ex.perturbation_norm).sum::<f64>() / examples.len() as f64;
525
526 let effectiveness = if avg_perturbation > 0.0 {
527 success_rate / avg_perturbation
528 } else {
529 0.0
530 };
531
532 println!(
533 " {:20} {:>11.1}% {:>14.4} {:>14.2}",
534 name,
535 success_rate * 100.0,
536 avg_perturbation,
537 effectiveness
538 );
539 }
540
541 Ok(())
542}
543
544/// Demonstrate ensemble defense
545fn ensemble_defense_demo() -> Result<()> {
546 println!(" Ensemble defense strategy:");
547
548 let ensemble_defense = QuantumDefenseStrategy::EnsembleDefense {
549 num_models: 5,
550 diversity_metric: "parameter_diversity".to_string(),
551 };
552
553 let layers = vec![
554 QNNLayerType::EncodingLayer { num_features: 4 },
555 QNNLayerType::VariationalLayer { num_params: 8 },
556 QNNLayerType::MeasurementLayer {
557 measurement_basis: "computational".to_string(),
558 },
559 ];
560
561 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
562 let config = create_default_adversarial_config();
563
564 let mut trainer = QuantumAdversarialTrainer::new(model, ensemble_defense, config);
565
566 println!(" - Number of models: 5");
567 println!(" - Diversity metric: Parameter diversity");
568
569 // Initialize ensemble
570 println!("\n Initializing ensemble models...");
571 // trainer.initialize_ensemble()?; // Method is private
572 println!(" Ensemble initialized (placeholder)");
573
574 println!(" Ensemble initialized successfully");
575
576 // Test ensemble robustness (simplified)
577 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
578
579 println!("\n Ensemble prediction characteristics:");
580 println!(" - Improved robustness through model diversity");
581 println!(" - Reduced attack transferability");
582 println!(" - Majority voting for final predictions");
583
584 // Compare single model vs ensemble attack success
585 // let single_model_attack = trainer.generate_single_adversarial_example(
586 // &test_input,
587 // 0,
588 // QuantumAttackType::FGSM { epsilon: 0.1 }
589 // )?;
590 // Method is private - using public generate_adversarial_examples instead
591 let single_model_attack = trainer.generate_adversarial_examples(
592 &Array2::from_shape_vec((1, test_input.len()), test_input.to_vec())?,
593 &Array1::from_vec(vec![0]),
594 QuantumAttackType::FGSM { epsilon: 0.1 },
595 )?[0]
596 .clone();
597
598 println!("\n Single model vs. ensemble comparison:");
599 println!(
600 " - Single model attack success: {}",
601 if single_model_attack.attack_success {
602 "Yes"
603 } else {
604 "No"
605 }
606 );
607 println!(
608 " - Perturbation magnitude: {:.4}",
609 single_model_attack.perturbation_norm
610 );
611
612 Ok(())
613}Sourcepub fn apply_defense(&self, input: &Array1<f64>) -> Result<Array1<f64>>
pub fn apply_defense(&self, input: &Array1<f64>) -> Result<Array1<f64>>
Apply defense strategy to input
Examples found in repository?
examples/quantum_adversarial.rs (line 188)
165fn defense_mechanisms_demo() -> Result<()> {
166 println!(" Testing defense strategies:");
167
168 // Input preprocessing defense
169 println!(" - Input Preprocessing...");
170 let preprocessing_defense = QuantumDefenseStrategy::InputPreprocessing {
171 noise_addition: 0.05,
172 feature_squeezing: true,
173 };
174
175 let layers = vec![
176 QNNLayerType::EncodingLayer { num_features: 4 },
177 QNNLayerType::VariationalLayer { num_params: 6 },
178 QNNLayerType::MeasurementLayer {
179 measurement_basis: "computational".to_string(),
180 },
181 ];
182
183 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
184 let config = create_default_adversarial_config();
185 let trainer = QuantumAdversarialTrainer::new(model, preprocessing_defense, config.clone());
186
187 let test_input = Array1::from_vec(vec![0.51, 0.32, 0.83, 0.24]);
188 let defended_input = trainer.apply_defense(&test_input)?;
189
190 let defense_effect = (&defended_input - &test_input).mapv(f64::abs).sum();
191 println!(" Defense effect magnitude: {defense_effect:.4}");
192
193 // Randomized circuit defense
194 println!(" - Randomized Circuit Defense...");
195 let randomized_defense = QuantumDefenseStrategy::RandomizedCircuit {
196 randomization_strength: 0.1,
197 num_random_layers: 2,
198 };
199
200 let layers2 = vec![
201 QNNLayerType::EncodingLayer { num_features: 4 },
202 QNNLayerType::VariationalLayer { num_params: 8 },
203 ];
204
205 let model2 = QuantumNeuralNetwork::new(layers2, 4, 4, 2)?;
206 let trainer2 = QuantumAdversarialTrainer::new(model2, randomized_defense, config);
207
208 let defended_input2 = trainer2.apply_defense(&test_input)?;
209 let randomization_effect = (&defended_input2 - &test_input).mapv(f64::abs).sum();
210 println!(" Randomization effect: {randomization_effect:.4}");
211
212 // Quantum error correction defense
213 println!(" - Quantum Error Correction...");
214 let qec_defense = QuantumDefenseStrategy::QuantumErrorCorrection {
215 code_type: "surface_code".to_string(),
216 correction_threshold: 0.01,
217 };
218
219 println!(" Error correction configured with surface codes");
220 println!(" Correction threshold: 1%");
221
222 Ok(())
223}Sourcepub fn get_robustness_metrics(&self) -> &RobustnessMetrics
pub fn get_robustness_metrics(&self) -> &RobustnessMetrics
Get robustness metrics
Examples found in repository?
examples/quantum_adversarial.rs (line 286)
226fn adversarial_training_demo() -> Result<()> {
227 // Create model and trainer
228 let layers = vec![
229 QNNLayerType::EncodingLayer { num_features: 4 },
230 QNNLayerType::VariationalLayer { num_params: 12 },
231 QNNLayerType::EntanglementLayer {
232 connectivity: "circular".to_string(),
233 },
234 QNNLayerType::MeasurementLayer {
235 measurement_basis: "computational".to_string(),
236 },
237 ];
238
239 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
240
241 let defense = QuantumDefenseStrategy::AdversarialTraining {
242 attack_types: vec![
243 QuantumAttackType::FGSM { epsilon: 0.08 },
244 QuantumAttackType::PGD {
245 epsilon: 0.08,
246 alpha: 0.01,
247 num_steps: 7,
248 },
249 ],
250 adversarial_ratio: 0.4,
251 };
252
253 let mut config = create_default_adversarial_config();
254 config.epochs = 20; // Reduced for demo
255 config.eval_interval = 5;
256
257 let mut trainer = QuantumAdversarialTrainer::new(model, defense, config);
258
259 println!(" Adversarial training configuration:");
260 println!(" - Attack types: FGSM + PGD");
261 println!(" - Adversarial ratio: 40%");
262 println!(" - Training epochs: 20");
263
264 // Generate synthetic training data
265 let train_data = generate_quantum_dataset(200, 4);
266 let train_labels = Array1::from_shape_fn(200, |i| i % 2);
267
268 let val_data = generate_quantum_dataset(50, 4);
269 let val_labels = Array1::from_shape_fn(50, |i| i % 2);
270
271 // Train with adversarial examples
272 println!("\n Starting adversarial training...");
273 let mut optimizer = Adam::new(0.001);
274 let losses = trainer.train(
275 &train_data,
276 &train_labels,
277 &val_data,
278 &val_labels,
279 &mut optimizer,
280 )?;
281
282 println!(" Training completed!");
283 println!(" Final loss: {:.4}", losses.last().unwrap_or(&0.0));
284
285 // Show final robustness metrics
286 let metrics = trainer.get_robustness_metrics();
287 println!("\n Final robustness metrics:");
288 println!(" - Clean accuracy: {:.3}", metrics.clean_accuracy);
289 println!(" - Robust accuracy: {:.3}", metrics.robust_accuracy);
290 println!(
291 " - Attack success rate: {:.3}",
292 metrics.attack_success_rate
293 );
294
295 Ok(())
296}Sourcepub fn get_attack_history(&self) -> &[QuantumAdversarialExample]
pub fn get_attack_history(&self) -> &[QuantumAdversarialExample]
Get attack history
Sourcepub fn certified_defense_analysis(
&self,
data: &Array2<f64>,
smoothing_variance: f64,
num_samples: usize,
) -> Result<f64>
pub fn certified_defense_analysis( &self, data: &Array2<f64>, smoothing_variance: f64, num_samples: usize, ) -> Result<f64>
Perform certified defense analysis
Examples found in repository?
examples/quantum_adversarial.rs (lines 428-431)
400fn certified_defense_demo() -> Result<()> {
401 let layers = vec![
402 QNNLayerType::EncodingLayer { num_features: 4 },
403 QNNLayerType::VariationalLayer { num_params: 6 },
404 QNNLayerType::MeasurementLayer {
405 measurement_basis: "computational".to_string(),
406 },
407 ];
408
409 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
410
411 let certified_defense = QuantumDefenseStrategy::CertifiedDefense {
412 smoothing_variance: 0.1,
413 confidence_level: 0.95,
414 };
415
416 let config = create_default_adversarial_config();
417 let trainer = QuantumAdversarialTrainer::new(model, certified_defense, config);
418
419 println!(" Certified defense analysis:");
420 println!(" - Smoothing variance: 0.1");
421 println!(" - Confidence level: 95%");
422
423 // Generate test data
424 let test_data = generate_quantum_dataset(50, 4);
425
426 // Perform certified analysis
427 println!("\n Running randomized smoothing certification...");
428 let certified_accuracy = trainer.certified_defense_analysis(
429 &test_data, 0.1, // smoothing variance
430 100, // number of samples
431 )?;
432
433 println!(" Certified accuracy: {:.2}%", certified_accuracy * 100.0);
434
435 // Compare with different smoothing levels
436 let smoothing_levels = vec![0.05, 0.1, 0.15, 0.2];
437 println!("\n Certified accuracy vs. smoothing variance:");
438
439 for &variance in &smoothing_levels {
440 let cert_acc = trainer.certified_defense_analysis(&test_data, variance, 50)?;
441 println!(" σ = {:.2}: {:.1}% certified", variance, cert_acc * 100.0);
442 }
443
444 Ok(())
445}Auto Trait Implementations§
impl Freeze for QuantumAdversarialTrainer
impl RefUnwindSafe for QuantumAdversarialTrainer
impl Send for QuantumAdversarialTrainer
impl Sync for QuantumAdversarialTrainer
impl Unpin for QuantumAdversarialTrainer
impl UnwindSafe for QuantumAdversarialTrainer
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.