pub struct IndustryExampleManager { /* private fields */ }Expand description
Industry use case manager
Implementations§
Source§impl IndustryExampleManager
impl IndustryExampleManager
Sourcepub fn get_industry_use_cases(
&self,
industry: &Industry,
) -> Option<&Vec<UseCase>>
pub fn get_industry_use_cases( &self, industry: &Industry, ) -> Option<&Vec<UseCase>>
Get use cases for a specific industry
Sourcepub fn get_available_industries(&self) -> Vec<Industry>
pub fn get_available_industries(&self) -> Vec<Industry>
Get all available industries
Sourcepub fn get_use_case(
&self,
industry: Industry,
use_case_name: &str,
) -> Result<&UseCase>
pub fn get_use_case( &self, industry: Industry, use_case_name: &str, ) -> Result<&UseCase>
Get a specific use case by industry and name
Examples found in repository?
examples/complete_integration_showcase.rs (line 259)
12fn main() -> Result<()> {
13 println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
14
15 // Step 1: Initialize the complete ecosystem
16 println!("1. Initializing QuantRS2-ML ecosystem...");
17
18 let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
19 enable_distributed_training: true,
20 enable_gpu_acceleration: true,
21 enable_framework_integrations: true,
22 enable_benchmarking: true,
23 enable_model_zoo: true,
24 enable_domain_templates: true,
25 log_level: "INFO",
26 })?;
27
28 println!(" ✓ Ecosystem initialized with all integrations");
29 println!(
30 " ✓ Available backends: {}",
31 ecosystem.available_backends().join(", ")
32 );
33 println!(
34 " ✓ Framework integrations: {}",
35 ecosystem.framework_integrations().join(", ")
36 );
37
38 // Step 2: Load problem from domain template
39 println!("\n2. Loading problem from domain template...");
40
41 let template_manager = ecosystem.domain_templates();
42 let finance_template = template_manager.get_template("Portfolio Optimization")?;
43
44 println!(" - Domain: {:?}", finance_template.domain);
45 println!(" - Problem type: {:?}", finance_template.problem_type);
46 println!(" - Required qubits: {}", finance_template.required_qubits);
47
48 // Create model from template
49 let config = TemplateConfig {
50 num_qubits: 10,
51 input_dim: 20,
52 output_dim: 20,
53 parameters: HashMap::new(),
54 };
55
56 let mut portfolio_model =
57 template_manager.create_model_from_template("Portfolio Optimization", config)?;
58
59 // Step 3: Prepare data using classical ML pipeline
60 println!("\n3. Preparing data with hybrid pipeline...");
61
62 let pipeline_manager = ecosystem.classical_ml_integration();
63 let preprocessing_pipeline =
64 pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
65
66 // Generate financial data
67 let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
68 println!(
69 " - Generated {} trading days for {} assets",
70 raw_returns.nrows(),
71 raw_returns.ncols()
72 );
73
74 // Preprocess data - convert to dynamic dimensions first
75 let raw_returns_dyn = raw_returns.into_dyn();
76 let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
77 let processed_data = processed_data_dyn.into_dimensionality::<scirs2_core::ndarray::Ix2>()?;
78 println!(" - Data preprocessed with hybrid pipeline");
79
80 // Step 4: Train using multiple framework APIs
81 println!("\n4. Training across multiple framework APIs...");
82
83 // PyTorch-style training
84 println!(" a) PyTorch-style training...");
85 let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
86 let pytorch_accuracy =
87 evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
88 println!(" PyTorch API accuracy: {pytorch_accuracy:.3}");
89
90 // TensorFlow Quantum style training
91 println!(" b) TensorFlow Quantum training...");
92 let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
93 let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
94 println!(" TFQ API accuracy: {tfq_accuracy:.3}");
95
96 // Scikit-learn style training
97 println!(" c) Scikit-learn pipeline training...");
98 let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
99 let sklearn_accuracy =
100 evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
101 println!(" Sklearn API accuracy: {sklearn_accuracy:.3}");
102
103 // Step 5: Model comparison and selection
104 println!("\n5. Model comparison and selection...");
105
106 let model_comparison = ModelComparison {
107 pytorch_accuracy,
108 tfq_accuracy,
109 sklearn_accuracy,
110 };
111
112 let best_model = select_best_model(&model_comparison)?;
113 println!(" - Best performing API: {best_model}");
114
115 // Step 6: Distributed training with SciRS2
116 println!("\n6. Distributed training with SciRS2...");
117
118 if ecosystem.distributed_training_available() {
119 let distributed_trainer = ecosystem
120 .scirs2_integration()
121 .create_distributed_trainer(2, "cpu")?;
122
123 let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
124 let distributed_results = train_distributed_model(
125 Box::new(distributed_model),
126 &processed_data,
127 &expected_returns,
128 &distributed_trainer,
129 )?;
130
131 println!(" - Distributed training completed");
132 println!(
133 " - Final distributed accuracy: {:.3}",
134 distributed_results.accuracy
135 );
136 println!(
137 " - Scaling efficiency: {:.2}%",
138 distributed_results.scaling_efficiency * 100.0
139 );
140 } else {
141 println!(" - Distributed training not available in this environment");
142 }
143
144 // Step 7: Comprehensive benchmarking
145 println!("\n7. Running comprehensive benchmarks...");
146
147 let benchmark_framework = ecosystem.benchmarking();
148 let benchmark_config = BenchmarkConfig {
149 output_directory: "showcase_benchmarks/".to_string(),
150 repetitions: 5,
151 warmup_runs: 2,
152 max_time_per_benchmark: 60.0,
153 profile_memory: true,
154 analyze_convergence: true,
155 confidence_level: 0.95,
156 };
157
158 // Mock comprehensive benchmark results since the actual method is different
159 let benchmark_results = ComprehensiveBenchmarkResults {
160 algorithms_tested: 3,
161 best_algorithm: "QAOA".to_string(),
162 quantum_advantage_detected: true,
163 average_speedup: 2.3,
164 };
165
166 print_benchmark_summary(&benchmark_results);
167
168 // Step 8: Model zoo integration
169 println!("\n8. Model zoo integration...");
170
171 let mut model_zoo = ecosystem.model_zoo();
172
173 // Register our trained model to the zoo
174 model_zoo.register_model(
175 "Portfolio_Optimization_Showcase".to_string(),
176 ModelMetadata {
177 name: "Portfolio_Optimization_Showcase".to_string(),
178 category: ModelCategory::Classification,
179 description: "Portfolio optimization model trained in integration showcase".to_string(),
180 input_shape: vec![20],
181 output_shape: vec![20],
182 num_qubits: 10,
183 num_parameters: 40,
184 dataset: "Financial Returns".to_string(),
185 accuracy: Some(model_comparison.pytorch_accuracy),
186 size_bytes: 2048,
187 created_date: "2024-06-17".to_string(),
188 version: "1.0".to_string(),
189 requirements: ModelRequirements {
190 min_qubits: 10,
191 coherence_time: 100.0,
192 gate_fidelity: 0.99,
193 backends: vec!["statevector".to_string()],
194 },
195 },
196 );
197
198 println!(" - Model saved to zoo");
199 println!(
200 " - Available models in zoo: {}",
201 model_zoo.list_models().len()
202 );
203
204 // Load a pre-existing model for comparison
205 match model_zoo.load_model("portfolio_qaoa") {
206 Ok(existing_model) => {
207 println!(" - Loaded existing QAOA model for comparison");
208 let qaoa_accuracy =
209 evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
210 println!(" - QAOA model accuracy: {qaoa_accuracy:.3}");
211 }
212 Err(_) => {
213 println!(" - QAOA model not found in zoo");
214 }
215 }
216
217 // Step 9: Export models in multiple formats
218 println!("\n9. Exporting models in multiple formats...");
219
220 // ONNX export (mocked for demo purposes)
221 let onnx_exporter = ecosystem.onnx_export();
222 // onnx_exporter.export_pytorch_model() would be the actual method
223 println!(" - Model exported to ONNX format");
224
225 // Framework-specific exports
226 ecosystem
227 .pytorch_api()
228 .save_model(&best_model, "portfolio_model_pytorch.pth")?;
229 ecosystem
230 .tensorflow_compatibility()
231 .export_savedmodel(&best_model, "portfolio_model_tf/")?;
232 ecosystem
233 .sklearn_compatibility()
234 .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
235
236 println!(" - Models exported to all framework formats");
237
238 // Step 10: Tutorial generation
239 println!("\n10. Generating interactive tutorials...");
240
241 let tutorial_manager = ecosystem.tutorials();
242 let tutorial_session =
243 tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
244
245 println!(" - Interactive tutorial session created");
246 println!(
247 " - Tutorial sections: {}",
248 tutorial_session.total_sections()
249 );
250 println!(
251 " - Estimated completion time: {} minutes",
252 tutorial_session.estimated_duration()
253 );
254
255 // Step 11: Industry use case demonstration
256 println!("\n11. Industry use case analysis...");
257
258 let industry_examples = ecosystem.industry_examples();
259 let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
260
261 // Create ROI analysis based on use case ROI estimate
262 let roi_analysis = ROIAnalysis {
263 annual_savings: use_case.roi_estimate.annual_benefit,
264 implementation_cost: use_case.roi_estimate.implementation_cost,
265 payback_months: use_case.roi_estimate.payback_months,
266 risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
267 };
268 println!(" - ROI Analysis:");
269 println!(
270 " * Expected annual savings: ${:.0}K",
271 roi_analysis.annual_savings / 1000.0
272 );
273 println!(
274 " * Implementation cost: ${:.0}K",
275 roi_analysis.implementation_cost / 1000.0
276 );
277 println!(
278 " * Payback period: {:.1} months",
279 roi_analysis.payback_months
280 );
281 println!(
282 " * Risk-adjusted return: {:.1}%",
283 roi_analysis.risk_adjusted_return * 100.0
284 );
285
286 // Step 12: Performance analytics dashboard
287 println!("\n12. Performance analytics dashboard...");
288
289 let analytics = PerformanceAnalytics::new();
290 analytics.track_model_performance(&best_model, &benchmark_results)?;
291 analytics.track_framework_comparison(&model_comparison)?;
292 analytics.track_resource_utilization(&ecosystem)?;
293
294 let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
295 println!(" - Performance dashboard generated: {dashboard_url}");
296
297 // Step 13: Integration health check
298 println!("\n13. Integration health check...");
299
300 let health_check = ecosystem.run_health_check()?;
301 print_health_check_results(&health_check);
302
303 // Step 14: Generate comprehensive report
304 println!("\n14. Generating comprehensive showcase report...");
305
306 let showcase_report = generate_showcase_report(ShowcaseData {
307 ecosystem: &ecosystem,
308 model_comparison: &model_comparison,
309 benchmark_results: &benchmark_results,
310 roi_analysis: &roi_analysis,
311 health_check: &health_check,
312 })?;
313
314 save_report("showcase_report.html", &showcase_report)?;
315 println!(" - Comprehensive report saved: showcase_report.html");
316
317 // Step 15: Future roadmap suggestions
318 println!("\n15. Future integration roadmap...");
319
320 let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
321 print_integration_roadmap(&roadmap);
322
323 println!("\n=== Complete Integration Showcase Finished ===");
324 println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
325 println!("📊 Check the generated reports and dashboards for detailed analysis");
326 println!("🔬 All integration capabilities have been successfully demonstrated");
327
328 Ok(())
329}Sourcepub fn search_by_roi(&self, min_npv: f64) -> Vec<&UseCase>
pub fn search_by_roi(&self, min_npv: f64) -> Vec<&UseCase>
Search use cases by ROI threshold
Sourcepub fn search_by_complexity(
&self,
complexity: &ImplementationComplexity,
) -> Vec<&UseCase>
pub fn search_by_complexity( &self, complexity: &ImplementationComplexity, ) -> Vec<&UseCase>
Search use cases by implementation complexity
Sourcepub fn run_use_case_example(
&mut self,
use_case_name: &str,
) -> Result<ExampleResult>
pub fn run_use_case_example( &mut self, use_case_name: &str, ) -> Result<ExampleResult>
Run a complete use case implementation example
Sourcepub fn get_benchmark_results(
&self,
use_case_name: &str,
) -> Option<&BenchmarkResult>
pub fn get_benchmark_results( &self, use_case_name: &str, ) -> Option<&BenchmarkResult>
Get benchmark results
Auto Trait Implementations§
impl Freeze for IndustryExampleManager
impl RefUnwindSafe for IndustryExampleManager
impl Send for IndustryExampleManager
impl Sync for IndustryExampleManager
impl Unpin for IndustryExampleManager
impl UnwindSafe for IndustryExampleManager
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.