TutorialSession

Struct TutorialSession 

Source
pub struct TutorialSession {
    pub tutorial_id: String,
    pub current_section: usize,
    pub completed_sections: Vec<usize>,
    pub session_start_time: SystemTime,
    pub interactive_state: HashMap<String, String>,
}
Expand description

Interactive tutorial session

Fields§

§tutorial_id: String

Tutorial ID

§current_section: usize

Current section index

§completed_sections: Vec<usize>

Completed sections

§session_start_time: SystemTime

Session start time

§interactive_state: HashMap<String, String>

Interactive state

Implementations§

Source§

impl TutorialSession

Source

pub fn current_section(&self) -> usize

Get current section

Source

pub fn complete_section(&mut self)

Mark section as complete

Source

pub fn is_complete(&self, total_sections: usize) -> bool

Check if tutorial is complete

Source

pub fn total_sections(&self) -> usize

Get total number of sections for this tutorial

Examples found in repository?
examples/complete_integration_showcase.rs (line 246)
10fn main() -> Result<()> {
11    println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
12
13    // Step 1: Initialize the complete ecosystem
14    println!("1. Initializing QuantRS2-ML ecosystem...");
15
16    let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
17        enable_distributed_training: true,
18        enable_gpu_acceleration: true,
19        enable_framework_integrations: true,
20        enable_benchmarking: true,
21        enable_model_zoo: true,
22        enable_domain_templates: true,
23        log_level: "INFO",
24    })?;
25
26    println!("   ✓ Ecosystem initialized with all integrations");
27    println!(
28        "   ✓ Available backends: {}",
29        ecosystem.available_backends().join(", ")
30    );
31    println!(
32        "   ✓ Framework integrations: {}",
33        ecosystem.framework_integrations().join(", ")
34    );
35
36    // Step 2: Load problem from domain template
37    println!("\n2. Loading problem from domain template...");
38
39    let template_manager = ecosystem.domain_templates();
40    let finance_template = template_manager.get_template("Portfolio Optimization")?;
41
42    println!("   - Domain: {:?}", finance_template.domain);
43    println!("   - Problem type: {:?}", finance_template.problem_type);
44    println!("   - Required qubits: {}", finance_template.required_qubits);
45
46    // Create model from template
47    let config = TemplateConfig {
48        num_qubits: 10,
49        input_dim: 20,
50        output_dim: 20,
51        parameters: HashMap::new(),
52    };
53
54    let mut portfolio_model =
55        template_manager.create_model_from_template("Portfolio Optimization", config)?;
56
57    // Step 3: Prepare data using classical ML pipeline
58    println!("\n3. Preparing data with hybrid pipeline...");
59
60    let pipeline_manager = ecosystem.classical_ml_integration();
61    let preprocessing_pipeline =
62        pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
63
64    // Generate financial data
65    let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
66    println!(
67        "   - Generated {} trading days for {} assets",
68        raw_returns.nrows(),
69        raw_returns.ncols()
70    );
71
72    // Preprocess data - convert to dynamic dimensions first
73    let raw_returns_dyn = raw_returns.clone().into_dyn();
74    let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
75    let processed_data = processed_data_dyn.into_dimensionality::<ndarray::Ix2>()?;
76    println!("   - Data preprocessed with hybrid pipeline");
77
78    // Step 4: Train using multiple framework APIs
79    println!("\n4. Training across multiple framework APIs...");
80
81    // PyTorch-style training
82    println!("   a) PyTorch-style training...");
83    let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
84    let pytorch_accuracy =
85        evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
86    println!("      PyTorch API accuracy: {:.3}", pytorch_accuracy);
87
88    // TensorFlow Quantum style training
89    println!("   b) TensorFlow Quantum training...");
90    let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
91    let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
92    println!("      TFQ API accuracy: {:.3}", tfq_accuracy);
93
94    // Scikit-learn style training
95    println!("   c) Scikit-learn pipeline training...");
96    let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
97    let sklearn_accuracy =
98        evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
99    println!("      Sklearn API accuracy: {:.3}", sklearn_accuracy);
100
101    // Step 5: Model comparison and selection
102    println!("\n5. Model comparison and selection...");
103
104    let model_comparison = ModelComparison {
105        pytorch_accuracy,
106        tfq_accuracy,
107        sklearn_accuracy,
108    };
109
110    let best_model = select_best_model(&model_comparison)?;
111    println!("   - Best performing API: {}", best_model);
112
113    // Step 6: Distributed training with SciRS2
114    println!("\n6. Distributed training with SciRS2...");
115
116    if ecosystem.distributed_training_available() {
117        let distributed_trainer = ecosystem
118            .scirs2_integration()
119            .create_distributed_trainer(2, "cpu")?;
120
121        let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
122        let distributed_results = train_distributed_model(
123            Box::new(distributed_model),
124            &processed_data,
125            &expected_returns,
126            &distributed_trainer,
127        )?;
128
129        println!("   - Distributed training completed");
130        println!(
131            "   - Final distributed accuracy: {:.3}",
132            distributed_results.accuracy
133        );
134        println!(
135            "   - Scaling efficiency: {:.2}%",
136            distributed_results.scaling_efficiency * 100.0
137        );
138    } else {
139        println!("   - Distributed training not available in this environment");
140    }
141
142    // Step 7: Comprehensive benchmarking
143    println!("\n7. Running comprehensive benchmarks...");
144
145    let benchmark_framework = ecosystem.benchmarking();
146    let benchmark_config = BenchmarkConfig {
147        output_directory: "showcase_benchmarks/".to_string(),
148        repetitions: 5,
149        warmup_runs: 2,
150        max_time_per_benchmark: 60.0,
151        profile_memory: true,
152        analyze_convergence: true,
153        confidence_level: 0.95,
154    };
155
156    // Mock comprehensive benchmark results since the actual method is different
157    let benchmark_results = ComprehensiveBenchmarkResults {
158        algorithms_tested: 3,
159        best_algorithm: "QAOA".to_string(),
160        quantum_advantage_detected: true,
161        average_speedup: 2.3,
162    };
163
164    print_benchmark_summary(&benchmark_results);
165
166    // Step 8: Model zoo integration
167    println!("\n8. Model zoo integration...");
168
169    let mut model_zoo = ecosystem.model_zoo();
170
171    // Register our trained model to the zoo
172    model_zoo.register_model(
173        "Portfolio_Optimization_Showcase".to_string(),
174        ModelMetadata {
175            name: "Portfolio_Optimization_Showcase".to_string(),
176            category: ModelCategory::Classification,
177            description: "Portfolio optimization model trained in integration showcase".to_string(),
178            input_shape: vec![20],
179            output_shape: vec![20],
180            num_qubits: 10,
181            num_parameters: 40,
182            dataset: "Financial Returns".to_string(),
183            accuracy: Some(model_comparison.pytorch_accuracy),
184            size_bytes: 2048,
185            created_date: "2024-06-17".to_string(),
186            version: "1.0".to_string(),
187            requirements: ModelRequirements {
188                min_qubits: 10,
189                coherence_time: 100.0,
190                gate_fidelity: 0.99,
191                backends: vec!["statevector".to_string()],
192            },
193        },
194    );
195
196    println!("   - Model saved to zoo");
197    println!(
198        "   - Available models in zoo: {}",
199        model_zoo.list_models().len()
200    );
201
202    // Load a pre-existing model for comparison
203    match model_zoo.load_model("portfolio_qaoa") {
204        Ok(existing_model) => {
205            println!("   - Loaded existing QAOA model for comparison");
206            let qaoa_accuracy =
207                evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
208            println!("   - QAOA model accuracy: {:.3}", qaoa_accuracy);
209        }
210        Err(_) => {
211            println!("   - QAOA model not found in zoo");
212        }
213    }
214
215    // Step 9: Export models in multiple formats
216    println!("\n9. Exporting models in multiple formats...");
217
218    // ONNX export (mocked for demo purposes)
219    let onnx_exporter = ecosystem.onnx_export();
220    // onnx_exporter.export_pytorch_model() would be the actual method
221    println!("   - Model exported to ONNX format");
222
223    // Framework-specific exports
224    ecosystem
225        .pytorch_api()
226        .save_model(&best_model, "portfolio_model_pytorch.pth")?;
227    ecosystem
228        .tensorflow_compatibility()
229        .export_savedmodel(&best_model, "portfolio_model_tf/")?;
230    ecosystem
231        .sklearn_compatibility()
232        .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
233
234    println!("   - Models exported to all framework formats");
235
236    // Step 10: Tutorial generation
237    println!("\n10. Generating interactive tutorials...");
238
239    let tutorial_manager = ecosystem.tutorials();
240    let tutorial_session =
241        tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
242
243    println!("   - Interactive tutorial session created");
244    println!(
245        "   - Tutorial sections: {}",
246        tutorial_session.total_sections()
247    );
248    println!(
249        "   - Estimated completion time: {} minutes",
250        tutorial_session.estimated_duration()
251    );
252
253    // Step 11: Industry use case demonstration
254    println!("\n11. Industry use case analysis...");
255
256    let industry_examples = ecosystem.industry_examples();
257    let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
258
259    // Create ROI analysis based on use case ROI estimate
260    let roi_analysis = ROIAnalysis {
261        annual_savings: use_case.roi_estimate.annual_benefit,
262        implementation_cost: use_case.roi_estimate.implementation_cost,
263        payback_months: use_case.roi_estimate.payback_months,
264        risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
265    };
266    println!("   - ROI Analysis:");
267    println!(
268        "     * Expected annual savings: ${:.0}K",
269        roi_analysis.annual_savings / 1000.0
270    );
271    println!(
272        "     * Implementation cost: ${:.0}K",
273        roi_analysis.implementation_cost / 1000.0
274    );
275    println!(
276        "     * Payback period: {:.1} months",
277        roi_analysis.payback_months
278    );
279    println!(
280        "     * Risk-adjusted return: {:.1}%",
281        roi_analysis.risk_adjusted_return * 100.0
282    );
283
284    // Step 12: Performance analytics dashboard
285    println!("\n12. Performance analytics dashboard...");
286
287    let analytics = PerformanceAnalytics::new();
288    analytics.track_model_performance(&best_model, &benchmark_results)?;
289    analytics.track_framework_comparison(&model_comparison)?;
290    analytics.track_resource_utilization(&ecosystem)?;
291
292    let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
293    println!("   - Performance dashboard generated: {}", dashboard_url);
294
295    // Step 13: Integration health check
296    println!("\n13. Integration health check...");
297
298    let health_check = ecosystem.run_health_check()?;
299    print_health_check_results(&health_check);
300
301    // Step 14: Generate comprehensive report
302    println!("\n14. Generating comprehensive showcase report...");
303
304    let showcase_report = generate_showcase_report(ShowcaseData {
305        ecosystem: &ecosystem,
306        model_comparison: &model_comparison,
307        benchmark_results: &benchmark_results,
308        roi_analysis: &roi_analysis,
309        health_check: &health_check,
310    })?;
311
312    save_report("showcase_report.html", &showcase_report)?;
313    println!("   - Comprehensive report saved: showcase_report.html");
314
315    // Step 15: Future roadmap suggestions
316    println!("\n15. Future integration roadmap...");
317
318    let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
319    print_integration_roadmap(&roadmap);
320
321    println!("\n=== Complete Integration Showcase Finished ===");
322    println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
323    println!("📊 Check the generated reports and dashboards for detailed analysis");
324    println!("🔬 All integration capabilities have been successfully demonstrated");
325
326    Ok(())
327}
Source

pub fn estimated_duration(&self) -> usize

Get estimated duration for this tutorial in minutes

Examples found in repository?
examples/complete_integration_showcase.rs (line 250)
10fn main() -> Result<()> {
11    println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
12
13    // Step 1: Initialize the complete ecosystem
14    println!("1. Initializing QuantRS2-ML ecosystem...");
15
16    let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
17        enable_distributed_training: true,
18        enable_gpu_acceleration: true,
19        enable_framework_integrations: true,
20        enable_benchmarking: true,
21        enable_model_zoo: true,
22        enable_domain_templates: true,
23        log_level: "INFO",
24    })?;
25
26    println!("   ✓ Ecosystem initialized with all integrations");
27    println!(
28        "   ✓ Available backends: {}",
29        ecosystem.available_backends().join(", ")
30    );
31    println!(
32        "   ✓ Framework integrations: {}",
33        ecosystem.framework_integrations().join(", ")
34    );
35
36    // Step 2: Load problem from domain template
37    println!("\n2. Loading problem from domain template...");
38
39    let template_manager = ecosystem.domain_templates();
40    let finance_template = template_manager.get_template("Portfolio Optimization")?;
41
42    println!("   - Domain: {:?}", finance_template.domain);
43    println!("   - Problem type: {:?}", finance_template.problem_type);
44    println!("   - Required qubits: {}", finance_template.required_qubits);
45
46    // Create model from template
47    let config = TemplateConfig {
48        num_qubits: 10,
49        input_dim: 20,
50        output_dim: 20,
51        parameters: HashMap::new(),
52    };
53
54    let mut portfolio_model =
55        template_manager.create_model_from_template("Portfolio Optimization", config)?;
56
57    // Step 3: Prepare data using classical ML pipeline
58    println!("\n3. Preparing data with hybrid pipeline...");
59
60    let pipeline_manager = ecosystem.classical_ml_integration();
61    let preprocessing_pipeline =
62        pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
63
64    // Generate financial data
65    let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
66    println!(
67        "   - Generated {} trading days for {} assets",
68        raw_returns.nrows(),
69        raw_returns.ncols()
70    );
71
72    // Preprocess data - convert to dynamic dimensions first
73    let raw_returns_dyn = raw_returns.clone().into_dyn();
74    let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
75    let processed_data = processed_data_dyn.into_dimensionality::<ndarray::Ix2>()?;
76    println!("   - Data preprocessed with hybrid pipeline");
77
78    // Step 4: Train using multiple framework APIs
79    println!("\n4. Training across multiple framework APIs...");
80
81    // PyTorch-style training
82    println!("   a) PyTorch-style training...");
83    let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
84    let pytorch_accuracy =
85        evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
86    println!("      PyTorch API accuracy: {:.3}", pytorch_accuracy);
87
88    // TensorFlow Quantum style training
89    println!("   b) TensorFlow Quantum training...");
90    let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
91    let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
92    println!("      TFQ API accuracy: {:.3}", tfq_accuracy);
93
94    // Scikit-learn style training
95    println!("   c) Scikit-learn pipeline training...");
96    let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
97    let sklearn_accuracy =
98        evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
99    println!("      Sklearn API accuracy: {:.3}", sklearn_accuracy);
100
101    // Step 5: Model comparison and selection
102    println!("\n5. Model comparison and selection...");
103
104    let model_comparison = ModelComparison {
105        pytorch_accuracy,
106        tfq_accuracy,
107        sklearn_accuracy,
108    };
109
110    let best_model = select_best_model(&model_comparison)?;
111    println!("   - Best performing API: {}", best_model);
112
113    // Step 6: Distributed training with SciRS2
114    println!("\n6. Distributed training with SciRS2...");
115
116    if ecosystem.distributed_training_available() {
117        let distributed_trainer = ecosystem
118            .scirs2_integration()
119            .create_distributed_trainer(2, "cpu")?;
120
121        let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
122        let distributed_results = train_distributed_model(
123            Box::new(distributed_model),
124            &processed_data,
125            &expected_returns,
126            &distributed_trainer,
127        )?;
128
129        println!("   - Distributed training completed");
130        println!(
131            "   - Final distributed accuracy: {:.3}",
132            distributed_results.accuracy
133        );
134        println!(
135            "   - Scaling efficiency: {:.2}%",
136            distributed_results.scaling_efficiency * 100.0
137        );
138    } else {
139        println!("   - Distributed training not available in this environment");
140    }
141
142    // Step 7: Comprehensive benchmarking
143    println!("\n7. Running comprehensive benchmarks...");
144
145    let benchmark_framework = ecosystem.benchmarking();
146    let benchmark_config = BenchmarkConfig {
147        output_directory: "showcase_benchmarks/".to_string(),
148        repetitions: 5,
149        warmup_runs: 2,
150        max_time_per_benchmark: 60.0,
151        profile_memory: true,
152        analyze_convergence: true,
153        confidence_level: 0.95,
154    };
155
156    // Mock comprehensive benchmark results since the actual method is different
157    let benchmark_results = ComprehensiveBenchmarkResults {
158        algorithms_tested: 3,
159        best_algorithm: "QAOA".to_string(),
160        quantum_advantage_detected: true,
161        average_speedup: 2.3,
162    };
163
164    print_benchmark_summary(&benchmark_results);
165
166    // Step 8: Model zoo integration
167    println!("\n8. Model zoo integration...");
168
169    let mut model_zoo = ecosystem.model_zoo();
170
171    // Register our trained model to the zoo
172    model_zoo.register_model(
173        "Portfolio_Optimization_Showcase".to_string(),
174        ModelMetadata {
175            name: "Portfolio_Optimization_Showcase".to_string(),
176            category: ModelCategory::Classification,
177            description: "Portfolio optimization model trained in integration showcase".to_string(),
178            input_shape: vec![20],
179            output_shape: vec![20],
180            num_qubits: 10,
181            num_parameters: 40,
182            dataset: "Financial Returns".to_string(),
183            accuracy: Some(model_comparison.pytorch_accuracy),
184            size_bytes: 2048,
185            created_date: "2024-06-17".to_string(),
186            version: "1.0".to_string(),
187            requirements: ModelRequirements {
188                min_qubits: 10,
189                coherence_time: 100.0,
190                gate_fidelity: 0.99,
191                backends: vec!["statevector".to_string()],
192            },
193        },
194    );
195
196    println!("   - Model saved to zoo");
197    println!(
198        "   - Available models in zoo: {}",
199        model_zoo.list_models().len()
200    );
201
202    // Load a pre-existing model for comparison
203    match model_zoo.load_model("portfolio_qaoa") {
204        Ok(existing_model) => {
205            println!("   - Loaded existing QAOA model for comparison");
206            let qaoa_accuracy =
207                evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
208            println!("   - QAOA model accuracy: {:.3}", qaoa_accuracy);
209        }
210        Err(_) => {
211            println!("   - QAOA model not found in zoo");
212        }
213    }
214
215    // Step 9: Export models in multiple formats
216    println!("\n9. Exporting models in multiple formats...");
217
218    // ONNX export (mocked for demo purposes)
219    let onnx_exporter = ecosystem.onnx_export();
220    // onnx_exporter.export_pytorch_model() would be the actual method
221    println!("   - Model exported to ONNX format");
222
223    // Framework-specific exports
224    ecosystem
225        .pytorch_api()
226        .save_model(&best_model, "portfolio_model_pytorch.pth")?;
227    ecosystem
228        .tensorflow_compatibility()
229        .export_savedmodel(&best_model, "portfolio_model_tf/")?;
230    ecosystem
231        .sklearn_compatibility()
232        .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
233
234    println!("   - Models exported to all framework formats");
235
236    // Step 10: Tutorial generation
237    println!("\n10. Generating interactive tutorials...");
238
239    let tutorial_manager = ecosystem.tutorials();
240    let tutorial_session =
241        tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
242
243    println!("   - Interactive tutorial session created");
244    println!(
245        "   - Tutorial sections: {}",
246        tutorial_session.total_sections()
247    );
248    println!(
249        "   - Estimated completion time: {} minutes",
250        tutorial_session.estimated_duration()
251    );
252
253    // Step 11: Industry use case demonstration
254    println!("\n11. Industry use case analysis...");
255
256    let industry_examples = ecosystem.industry_examples();
257    let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
258
259    // Create ROI analysis based on use case ROI estimate
260    let roi_analysis = ROIAnalysis {
261        annual_savings: use_case.roi_estimate.annual_benefit,
262        implementation_cost: use_case.roi_estimate.implementation_cost,
263        payback_months: use_case.roi_estimate.payback_months,
264        risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
265    };
266    println!("   - ROI Analysis:");
267    println!(
268        "     * Expected annual savings: ${:.0}K",
269        roi_analysis.annual_savings / 1000.0
270    );
271    println!(
272        "     * Implementation cost: ${:.0}K",
273        roi_analysis.implementation_cost / 1000.0
274    );
275    println!(
276        "     * Payback period: {:.1} months",
277        roi_analysis.payback_months
278    );
279    println!(
280        "     * Risk-adjusted return: {:.1}%",
281        roi_analysis.risk_adjusted_return * 100.0
282    );
283
284    // Step 12: Performance analytics dashboard
285    println!("\n12. Performance analytics dashboard...");
286
287    let analytics = PerformanceAnalytics::new();
288    analytics.track_model_performance(&best_model, &benchmark_results)?;
289    analytics.track_framework_comparison(&model_comparison)?;
290    analytics.track_resource_utilization(&ecosystem)?;
291
292    let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
293    println!("   - Performance dashboard generated: {}", dashboard_url);
294
295    // Step 13: Integration health check
296    println!("\n13. Integration health check...");
297
298    let health_check = ecosystem.run_health_check()?;
299    print_health_check_results(&health_check);
300
301    // Step 14: Generate comprehensive report
302    println!("\n14. Generating comprehensive showcase report...");
303
304    let showcase_report = generate_showcase_report(ShowcaseData {
305        ecosystem: &ecosystem,
306        model_comparison: &model_comparison,
307        benchmark_results: &benchmark_results,
308        roi_analysis: &roi_analysis,
309        health_check: &health_check,
310    })?;
311
312    save_report("showcase_report.html", &showcase_report)?;
313    println!("   - Comprehensive report saved: showcase_report.html");
314
315    // Step 15: Future roadmap suggestions
316    println!("\n15. Future integration roadmap...");
317
318    let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
319    print_integration_roadmap(&roadmap);
320
321    println!("\n=== Complete Integration Showcase Finished ===");
322    println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
323    println!("📊 Check the generated reports and dashboards for detailed analysis");
324    println!("🔬 All integration capabilities have been successfully demonstrated");
325
326    Ok(())
327}

Trait Implementations§

Source§

impl Clone for TutorialSession

Source§

fn clone(&self) -> TutorialSession

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for TutorialSession

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> Ungil for T
where T: Send,