pub struct TutorialSession {
pub tutorial_id: String,
pub current_section: usize,
pub completed_sections: Vec<usize>,
pub session_start_time: SystemTime,
pub interactive_state: HashMap<String, String>,
}
Expand description
Interactive tutorial session
Fields§
§tutorial_id: String
Tutorial ID
current_section: usize
Current section index
completed_sections: Vec<usize>
Completed sections
session_start_time: SystemTime
Session start time
interactive_state: HashMap<String, String>
Interactive state
Implementations§
Source§impl TutorialSession
impl TutorialSession
Sourcepub fn current_section(&self) -> usize
pub fn current_section(&self) -> usize
Get current section
Sourcepub fn complete_section(&mut self)
pub fn complete_section(&mut self)
Mark section as complete
Sourcepub fn is_complete(&self, total_sections: usize) -> bool
pub fn is_complete(&self, total_sections: usize) -> bool
Check if tutorial is complete
Sourcepub fn total_sections(&self) -> usize
pub fn total_sections(&self) -> usize
Get total number of sections for this tutorial
Examples found in repository?
examples/complete_integration_showcase.rs (line 247)
11fn main() -> Result<()> {
12 println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
13
14 // Step 1: Initialize the complete ecosystem
15 println!("1. Initializing QuantRS2-ML ecosystem...");
16
17 let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
18 enable_distributed_training: true,
19 enable_gpu_acceleration: true,
20 enable_framework_integrations: true,
21 enable_benchmarking: true,
22 enable_model_zoo: true,
23 enable_domain_templates: true,
24 log_level: "INFO",
25 })?;
26
27 println!(" ✓ Ecosystem initialized with all integrations");
28 println!(
29 " ✓ Available backends: {}",
30 ecosystem.available_backends().join(", ")
31 );
32 println!(
33 " ✓ Framework integrations: {}",
34 ecosystem.framework_integrations().join(", ")
35 );
36
37 // Step 2: Load problem from domain template
38 println!("\n2. Loading problem from domain template...");
39
40 let template_manager = ecosystem.domain_templates();
41 let finance_template = template_manager.get_template("Portfolio Optimization")?;
42
43 println!(" - Domain: {:?}", finance_template.domain);
44 println!(" - Problem type: {:?}", finance_template.problem_type);
45 println!(" - Required qubits: {}", finance_template.required_qubits);
46
47 // Create model from template
48 let config = TemplateConfig {
49 num_qubits: 10,
50 input_dim: 20,
51 output_dim: 20,
52 parameters: HashMap::new(),
53 };
54
55 let mut portfolio_model =
56 template_manager.create_model_from_template("Portfolio Optimization", config)?;
57
58 // Step 3: Prepare data using classical ML pipeline
59 println!("\n3. Preparing data with hybrid pipeline...");
60
61 let pipeline_manager = ecosystem.classical_ml_integration();
62 let preprocessing_pipeline =
63 pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
64
65 // Generate financial data
66 let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
67 println!(
68 " - Generated {} trading days for {} assets",
69 raw_returns.nrows(),
70 raw_returns.ncols()
71 );
72
73 // Preprocess data - convert to dynamic dimensions first
74 let raw_returns_dyn = raw_returns.clone().into_dyn();
75 let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
76 let processed_data = processed_data_dyn.into_dimensionality::<scirs2_core::ndarray::Ix2>()?;
77 println!(" - Data preprocessed with hybrid pipeline");
78
79 // Step 4: Train using multiple framework APIs
80 println!("\n4. Training across multiple framework APIs...");
81
82 // PyTorch-style training
83 println!(" a) PyTorch-style training...");
84 let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
85 let pytorch_accuracy =
86 evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
87 println!(" PyTorch API accuracy: {:.3}", pytorch_accuracy);
88
89 // TensorFlow Quantum style training
90 println!(" b) TensorFlow Quantum training...");
91 let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
92 let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
93 println!(" TFQ API accuracy: {:.3}", tfq_accuracy);
94
95 // Scikit-learn style training
96 println!(" c) Scikit-learn pipeline training...");
97 let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
98 let sklearn_accuracy =
99 evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
100 println!(" Sklearn API accuracy: {:.3}", sklearn_accuracy);
101
102 // Step 5: Model comparison and selection
103 println!("\n5. Model comparison and selection...");
104
105 let model_comparison = ModelComparison {
106 pytorch_accuracy,
107 tfq_accuracy,
108 sklearn_accuracy,
109 };
110
111 let best_model = select_best_model(&model_comparison)?;
112 println!(" - Best performing API: {}", best_model);
113
114 // Step 6: Distributed training with SciRS2
115 println!("\n6. Distributed training with SciRS2...");
116
117 if ecosystem.distributed_training_available() {
118 let distributed_trainer = ecosystem
119 .scirs2_integration()
120 .create_distributed_trainer(2, "cpu")?;
121
122 let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
123 let distributed_results = train_distributed_model(
124 Box::new(distributed_model),
125 &processed_data,
126 &expected_returns,
127 &distributed_trainer,
128 )?;
129
130 println!(" - Distributed training completed");
131 println!(
132 " - Final distributed accuracy: {:.3}",
133 distributed_results.accuracy
134 );
135 println!(
136 " - Scaling efficiency: {:.2}%",
137 distributed_results.scaling_efficiency * 100.0
138 );
139 } else {
140 println!(" - Distributed training not available in this environment");
141 }
142
143 // Step 7: Comprehensive benchmarking
144 println!("\n7. Running comprehensive benchmarks...");
145
146 let benchmark_framework = ecosystem.benchmarking();
147 let benchmark_config = BenchmarkConfig {
148 output_directory: "showcase_benchmarks/".to_string(),
149 repetitions: 5,
150 warmup_runs: 2,
151 max_time_per_benchmark: 60.0,
152 profile_memory: true,
153 analyze_convergence: true,
154 confidence_level: 0.95,
155 };
156
157 // Mock comprehensive benchmark results since the actual method is different
158 let benchmark_results = ComprehensiveBenchmarkResults {
159 algorithms_tested: 3,
160 best_algorithm: "QAOA".to_string(),
161 quantum_advantage_detected: true,
162 average_speedup: 2.3,
163 };
164
165 print_benchmark_summary(&benchmark_results);
166
167 // Step 8: Model zoo integration
168 println!("\n8. Model zoo integration...");
169
170 let mut model_zoo = ecosystem.model_zoo();
171
172 // Register our trained model to the zoo
173 model_zoo.register_model(
174 "Portfolio_Optimization_Showcase".to_string(),
175 ModelMetadata {
176 name: "Portfolio_Optimization_Showcase".to_string(),
177 category: ModelCategory::Classification,
178 description: "Portfolio optimization model trained in integration showcase".to_string(),
179 input_shape: vec![20],
180 output_shape: vec![20],
181 num_qubits: 10,
182 num_parameters: 40,
183 dataset: "Financial Returns".to_string(),
184 accuracy: Some(model_comparison.pytorch_accuracy),
185 size_bytes: 2048,
186 created_date: "2024-06-17".to_string(),
187 version: "1.0".to_string(),
188 requirements: ModelRequirements {
189 min_qubits: 10,
190 coherence_time: 100.0,
191 gate_fidelity: 0.99,
192 backends: vec!["statevector".to_string()],
193 },
194 },
195 );
196
197 println!(" - Model saved to zoo");
198 println!(
199 " - Available models in zoo: {}",
200 model_zoo.list_models().len()
201 );
202
203 // Load a pre-existing model for comparison
204 match model_zoo.load_model("portfolio_qaoa") {
205 Ok(existing_model) => {
206 println!(" - Loaded existing QAOA model for comparison");
207 let qaoa_accuracy =
208 evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
209 println!(" - QAOA model accuracy: {:.3}", qaoa_accuracy);
210 }
211 Err(_) => {
212 println!(" - QAOA model not found in zoo");
213 }
214 }
215
216 // Step 9: Export models in multiple formats
217 println!("\n9. Exporting models in multiple formats...");
218
219 // ONNX export (mocked for demo purposes)
220 let onnx_exporter = ecosystem.onnx_export();
221 // onnx_exporter.export_pytorch_model() would be the actual method
222 println!(" - Model exported to ONNX format");
223
224 // Framework-specific exports
225 ecosystem
226 .pytorch_api()
227 .save_model(&best_model, "portfolio_model_pytorch.pth")?;
228 ecosystem
229 .tensorflow_compatibility()
230 .export_savedmodel(&best_model, "portfolio_model_tf/")?;
231 ecosystem
232 .sklearn_compatibility()
233 .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
234
235 println!(" - Models exported to all framework formats");
236
237 // Step 10: Tutorial generation
238 println!("\n10. Generating interactive tutorials...");
239
240 let tutorial_manager = ecosystem.tutorials();
241 let tutorial_session =
242 tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
243
244 println!(" - Interactive tutorial session created");
245 println!(
246 " - Tutorial sections: {}",
247 tutorial_session.total_sections()
248 );
249 println!(
250 " - Estimated completion time: {} minutes",
251 tutorial_session.estimated_duration()
252 );
253
254 // Step 11: Industry use case demonstration
255 println!("\n11. Industry use case analysis...");
256
257 let industry_examples = ecosystem.industry_examples();
258 let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
259
260 // Create ROI analysis based on use case ROI estimate
261 let roi_analysis = ROIAnalysis {
262 annual_savings: use_case.roi_estimate.annual_benefit,
263 implementation_cost: use_case.roi_estimate.implementation_cost,
264 payback_months: use_case.roi_estimate.payback_months,
265 risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
266 };
267 println!(" - ROI Analysis:");
268 println!(
269 " * Expected annual savings: ${:.0}K",
270 roi_analysis.annual_savings / 1000.0
271 );
272 println!(
273 " * Implementation cost: ${:.0}K",
274 roi_analysis.implementation_cost / 1000.0
275 );
276 println!(
277 " * Payback period: {:.1} months",
278 roi_analysis.payback_months
279 );
280 println!(
281 " * Risk-adjusted return: {:.1}%",
282 roi_analysis.risk_adjusted_return * 100.0
283 );
284
285 // Step 12: Performance analytics dashboard
286 println!("\n12. Performance analytics dashboard...");
287
288 let analytics = PerformanceAnalytics::new();
289 analytics.track_model_performance(&best_model, &benchmark_results)?;
290 analytics.track_framework_comparison(&model_comparison)?;
291 analytics.track_resource_utilization(&ecosystem)?;
292
293 let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
294 println!(" - Performance dashboard generated: {}", dashboard_url);
295
296 // Step 13: Integration health check
297 println!("\n13. Integration health check...");
298
299 let health_check = ecosystem.run_health_check()?;
300 print_health_check_results(&health_check);
301
302 // Step 14: Generate comprehensive report
303 println!("\n14. Generating comprehensive showcase report...");
304
305 let showcase_report = generate_showcase_report(ShowcaseData {
306 ecosystem: &ecosystem,
307 model_comparison: &model_comparison,
308 benchmark_results: &benchmark_results,
309 roi_analysis: &roi_analysis,
310 health_check: &health_check,
311 })?;
312
313 save_report("showcase_report.html", &showcase_report)?;
314 println!(" - Comprehensive report saved: showcase_report.html");
315
316 // Step 15: Future roadmap suggestions
317 println!("\n15. Future integration roadmap...");
318
319 let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
320 print_integration_roadmap(&roadmap);
321
322 println!("\n=== Complete Integration Showcase Finished ===");
323 println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
324 println!("📊 Check the generated reports and dashboards for detailed analysis");
325 println!("🔬 All integration capabilities have been successfully demonstrated");
326
327 Ok(())
328}
Sourcepub fn estimated_duration(&self) -> usize
pub fn estimated_duration(&self) -> usize
Get estimated duration for this tutorial in minutes
Examples found in repository?
examples/complete_integration_showcase.rs (line 251)
11fn main() -> Result<()> {
12 println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
13
14 // Step 1: Initialize the complete ecosystem
15 println!("1. Initializing QuantRS2-ML ecosystem...");
16
17 let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
18 enable_distributed_training: true,
19 enable_gpu_acceleration: true,
20 enable_framework_integrations: true,
21 enable_benchmarking: true,
22 enable_model_zoo: true,
23 enable_domain_templates: true,
24 log_level: "INFO",
25 })?;
26
27 println!(" ✓ Ecosystem initialized with all integrations");
28 println!(
29 " ✓ Available backends: {}",
30 ecosystem.available_backends().join(", ")
31 );
32 println!(
33 " ✓ Framework integrations: {}",
34 ecosystem.framework_integrations().join(", ")
35 );
36
37 // Step 2: Load problem from domain template
38 println!("\n2. Loading problem from domain template...");
39
40 let template_manager = ecosystem.domain_templates();
41 let finance_template = template_manager.get_template("Portfolio Optimization")?;
42
43 println!(" - Domain: {:?}", finance_template.domain);
44 println!(" - Problem type: {:?}", finance_template.problem_type);
45 println!(" - Required qubits: {}", finance_template.required_qubits);
46
47 // Create model from template
48 let config = TemplateConfig {
49 num_qubits: 10,
50 input_dim: 20,
51 output_dim: 20,
52 parameters: HashMap::new(),
53 };
54
55 let mut portfolio_model =
56 template_manager.create_model_from_template("Portfolio Optimization", config)?;
57
58 // Step 3: Prepare data using classical ML pipeline
59 println!("\n3. Preparing data with hybrid pipeline...");
60
61 let pipeline_manager = ecosystem.classical_ml_integration();
62 let preprocessing_pipeline =
63 pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
64
65 // Generate financial data
66 let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
67 println!(
68 " - Generated {} trading days for {} assets",
69 raw_returns.nrows(),
70 raw_returns.ncols()
71 );
72
73 // Preprocess data - convert to dynamic dimensions first
74 let raw_returns_dyn = raw_returns.clone().into_dyn();
75 let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
76 let processed_data = processed_data_dyn.into_dimensionality::<scirs2_core::ndarray::Ix2>()?;
77 println!(" - Data preprocessed with hybrid pipeline");
78
79 // Step 4: Train using multiple framework APIs
80 println!("\n4. Training across multiple framework APIs...");
81
82 // PyTorch-style training
83 println!(" a) PyTorch-style training...");
84 let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
85 let pytorch_accuracy =
86 evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
87 println!(" PyTorch API accuracy: {:.3}", pytorch_accuracy);
88
89 // TensorFlow Quantum style training
90 println!(" b) TensorFlow Quantum training...");
91 let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
92 let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
93 println!(" TFQ API accuracy: {:.3}", tfq_accuracy);
94
95 // Scikit-learn style training
96 println!(" c) Scikit-learn pipeline training...");
97 let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
98 let sklearn_accuracy =
99 evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
100 println!(" Sklearn API accuracy: {:.3}", sklearn_accuracy);
101
102 // Step 5: Model comparison and selection
103 println!("\n5. Model comparison and selection...");
104
105 let model_comparison = ModelComparison {
106 pytorch_accuracy,
107 tfq_accuracy,
108 sklearn_accuracy,
109 };
110
111 let best_model = select_best_model(&model_comparison)?;
112 println!(" - Best performing API: {}", best_model);
113
114 // Step 6: Distributed training with SciRS2
115 println!("\n6. Distributed training with SciRS2...");
116
117 if ecosystem.distributed_training_available() {
118 let distributed_trainer = ecosystem
119 .scirs2_integration()
120 .create_distributed_trainer(2, "cpu")?;
121
122 let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
123 let distributed_results = train_distributed_model(
124 Box::new(distributed_model),
125 &processed_data,
126 &expected_returns,
127 &distributed_trainer,
128 )?;
129
130 println!(" - Distributed training completed");
131 println!(
132 " - Final distributed accuracy: {:.3}",
133 distributed_results.accuracy
134 );
135 println!(
136 " - Scaling efficiency: {:.2}%",
137 distributed_results.scaling_efficiency * 100.0
138 );
139 } else {
140 println!(" - Distributed training not available in this environment");
141 }
142
143 // Step 7: Comprehensive benchmarking
144 println!("\n7. Running comprehensive benchmarks...");
145
146 let benchmark_framework = ecosystem.benchmarking();
147 let benchmark_config = BenchmarkConfig {
148 output_directory: "showcase_benchmarks/".to_string(),
149 repetitions: 5,
150 warmup_runs: 2,
151 max_time_per_benchmark: 60.0,
152 profile_memory: true,
153 analyze_convergence: true,
154 confidence_level: 0.95,
155 };
156
157 // Mock comprehensive benchmark results since the actual method is different
158 let benchmark_results = ComprehensiveBenchmarkResults {
159 algorithms_tested: 3,
160 best_algorithm: "QAOA".to_string(),
161 quantum_advantage_detected: true,
162 average_speedup: 2.3,
163 };
164
165 print_benchmark_summary(&benchmark_results);
166
167 // Step 8: Model zoo integration
168 println!("\n8. Model zoo integration...");
169
170 let mut model_zoo = ecosystem.model_zoo();
171
172 // Register our trained model to the zoo
173 model_zoo.register_model(
174 "Portfolio_Optimization_Showcase".to_string(),
175 ModelMetadata {
176 name: "Portfolio_Optimization_Showcase".to_string(),
177 category: ModelCategory::Classification,
178 description: "Portfolio optimization model trained in integration showcase".to_string(),
179 input_shape: vec![20],
180 output_shape: vec![20],
181 num_qubits: 10,
182 num_parameters: 40,
183 dataset: "Financial Returns".to_string(),
184 accuracy: Some(model_comparison.pytorch_accuracy),
185 size_bytes: 2048,
186 created_date: "2024-06-17".to_string(),
187 version: "1.0".to_string(),
188 requirements: ModelRequirements {
189 min_qubits: 10,
190 coherence_time: 100.0,
191 gate_fidelity: 0.99,
192 backends: vec!["statevector".to_string()],
193 },
194 },
195 );
196
197 println!(" - Model saved to zoo");
198 println!(
199 " - Available models in zoo: {}",
200 model_zoo.list_models().len()
201 );
202
203 // Load a pre-existing model for comparison
204 match model_zoo.load_model("portfolio_qaoa") {
205 Ok(existing_model) => {
206 println!(" - Loaded existing QAOA model for comparison");
207 let qaoa_accuracy =
208 evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
209 println!(" - QAOA model accuracy: {:.3}", qaoa_accuracy);
210 }
211 Err(_) => {
212 println!(" - QAOA model not found in zoo");
213 }
214 }
215
216 // Step 9: Export models in multiple formats
217 println!("\n9. Exporting models in multiple formats...");
218
219 // ONNX export (mocked for demo purposes)
220 let onnx_exporter = ecosystem.onnx_export();
221 // onnx_exporter.export_pytorch_model() would be the actual method
222 println!(" - Model exported to ONNX format");
223
224 // Framework-specific exports
225 ecosystem
226 .pytorch_api()
227 .save_model(&best_model, "portfolio_model_pytorch.pth")?;
228 ecosystem
229 .tensorflow_compatibility()
230 .export_savedmodel(&best_model, "portfolio_model_tf/")?;
231 ecosystem
232 .sklearn_compatibility()
233 .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
234
235 println!(" - Models exported to all framework formats");
236
237 // Step 10: Tutorial generation
238 println!("\n10. Generating interactive tutorials...");
239
240 let tutorial_manager = ecosystem.tutorials();
241 let tutorial_session =
242 tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
243
244 println!(" - Interactive tutorial session created");
245 println!(
246 " - Tutorial sections: {}",
247 tutorial_session.total_sections()
248 );
249 println!(
250 " - Estimated completion time: {} minutes",
251 tutorial_session.estimated_duration()
252 );
253
254 // Step 11: Industry use case demonstration
255 println!("\n11. Industry use case analysis...");
256
257 let industry_examples = ecosystem.industry_examples();
258 let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
259
260 // Create ROI analysis based on use case ROI estimate
261 let roi_analysis = ROIAnalysis {
262 annual_savings: use_case.roi_estimate.annual_benefit,
263 implementation_cost: use_case.roi_estimate.implementation_cost,
264 payback_months: use_case.roi_estimate.payback_months,
265 risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
266 };
267 println!(" - ROI Analysis:");
268 println!(
269 " * Expected annual savings: ${:.0}K",
270 roi_analysis.annual_savings / 1000.0
271 );
272 println!(
273 " * Implementation cost: ${:.0}K",
274 roi_analysis.implementation_cost / 1000.0
275 );
276 println!(
277 " * Payback period: {:.1} months",
278 roi_analysis.payback_months
279 );
280 println!(
281 " * Risk-adjusted return: {:.1}%",
282 roi_analysis.risk_adjusted_return * 100.0
283 );
284
285 // Step 12: Performance analytics dashboard
286 println!("\n12. Performance analytics dashboard...");
287
288 let analytics = PerformanceAnalytics::new();
289 analytics.track_model_performance(&best_model, &benchmark_results)?;
290 analytics.track_framework_comparison(&model_comparison)?;
291 analytics.track_resource_utilization(&ecosystem)?;
292
293 let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
294 println!(" - Performance dashboard generated: {}", dashboard_url);
295
296 // Step 13: Integration health check
297 println!("\n13. Integration health check...");
298
299 let health_check = ecosystem.run_health_check()?;
300 print_health_check_results(&health_check);
301
302 // Step 14: Generate comprehensive report
303 println!("\n14. Generating comprehensive showcase report...");
304
305 let showcase_report = generate_showcase_report(ShowcaseData {
306 ecosystem: &ecosystem,
307 model_comparison: &model_comparison,
308 benchmark_results: &benchmark_results,
309 roi_analysis: &roi_analysis,
310 health_check: &health_check,
311 })?;
312
313 save_report("showcase_report.html", &showcase_report)?;
314 println!(" - Comprehensive report saved: showcase_report.html");
315
316 // Step 15: Future roadmap suggestions
317 println!("\n15. Future integration roadmap...");
318
319 let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
320 print_integration_roadmap(&roadmap);
321
322 println!("\n=== Complete Integration Showcase Finished ===");
323 println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
324 println!("📊 Check the generated reports and dashboards for detailed analysis");
325 println!("🔬 All integration capabilities have been successfully demonstrated");
326
327 Ok(())
328}
Trait Implementations§
Source§impl Clone for TutorialSession
impl Clone for TutorialSession
Source§fn clone(&self) -> TutorialSession
fn clone(&self) -> TutorialSession
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreAuto Trait Implementations§
impl Freeze for TutorialSession
impl RefUnwindSafe for TutorialSession
impl Send for TutorialSession
impl Sync for TutorialSession
impl Unpin for TutorialSession
impl UnwindSafe for TutorialSession
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self
from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self
is actually part of its subset T
(and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset
but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self
to the equivalent element of its superset.