pub struct HybridPipelineManager { /* private fields */ }Expand description
Hybrid quantum-classical ML pipeline manager
Implementations§
Source§impl HybridPipelineManager
impl HybridPipelineManager
Sourcepub fn new() -> Self
pub fn new() -> Self
Create new hybrid pipeline manager
Examples found in repository?
More examples
Sourcepub fn create_pipeline(
&self,
template_name: &str,
config: PipelineConfig,
) -> Result<HybridPipeline>
pub fn create_pipeline( &self, template_name: &str, config: PipelineConfig, ) -> Result<HybridPipeline>
Create pipeline from template
Examples found in repository?
examples/complete_integration_showcase.rs (line 71)
19fn main() -> Result<()> {
20 println!("=== QuantRS2-ML Complete Integration Showcase ===\n");
21
22 // Step 1: Initialize the complete ecosystem
23 println!("1. Initializing QuantRS2-ML ecosystem...");
24
25 let ecosystem = QuantumMLEcosystem::new(EcosystemConfig {
26 enable_distributed_training: true,
27 enable_gpu_acceleration: true,
28 enable_framework_integrations: true,
29 enable_benchmarking: true,
30 enable_model_zoo: true,
31 enable_domain_templates: true,
32 log_level: "INFO",
33 })?;
34
35 println!(" ✓ Ecosystem initialized with all integrations");
36 println!(
37 " ✓ Available backends: {}",
38 ecosystem.available_backends().join(", ")
39 );
40 println!(
41 " ✓ Framework integrations: {}",
42 ecosystem.framework_integrations().join(", ")
43 );
44
45 // Step 2: Load problem from domain template
46 println!("\n2. Loading problem from domain template...");
47
48 let template_manager = ecosystem.domain_templates();
49 let finance_template = template_manager.get_template("Portfolio Optimization")?;
50
51 println!(" - Domain: {:?}", finance_template.domain);
52 println!(" - Problem type: {:?}", finance_template.problem_type);
53 println!(" - Required qubits: {}", finance_template.required_qubits);
54
55 // Create model from template
56 let config = TemplateConfig {
57 num_qubits: 10,
58 input_dim: 20,
59 output_dim: 20,
60 parameters: HashMap::new(),
61 };
62
63 let mut portfolio_model =
64 template_manager.create_model_from_template("Portfolio Optimization", config)?;
65
66 // Step 3: Prepare data using classical ML pipeline
67 println!("\n3. Preparing data with hybrid pipeline...");
68
69 let pipeline_manager = ecosystem.classical_ml_integration();
70 let preprocessing_pipeline =
71 pipeline_manager.create_pipeline("hybrid_classification", PipelineConfig::default())?;
72
73 // Generate financial data
74 let (raw_returns, expected_returns) = generate_financial_data(252, 20)?;
75 println!(
76 " - Generated {} trading days for {} assets",
77 raw_returns.nrows(),
78 raw_returns.ncols()
79 );
80
81 // Preprocess data - convert to dynamic dimensions first
82 let raw_returns_dyn = raw_returns.into_dyn();
83 let processed_data_dyn = preprocessing_pipeline.transform(&raw_returns_dyn)?;
84 let processed_data = processed_data_dyn.into_dimensionality::<scirs2_core::ndarray::Ix2>()?;
85 println!(" - Data preprocessed with hybrid pipeline");
86
87 // Step 4: Train using multiple framework APIs
88 println!("\n4. Training across multiple framework APIs...");
89
90 // PyTorch-style training
91 println!(" a) PyTorch-style training...");
92 let pytorch_model = train_pytorch_style(&processed_data, &expected_returns)?;
93 let pytorch_accuracy =
94 evaluate_pytorch_model(&pytorch_model, &processed_data, &expected_returns)?;
95 println!(" PyTorch API accuracy: {pytorch_accuracy:.3}");
96
97 // TensorFlow Quantum style training
98 println!(" b) TensorFlow Quantum training...");
99 let tfq_model = train_tensorflow_style(&processed_data, &expected_returns)?;
100 let tfq_accuracy = evaluate_tfq_model(&tfq_model, &processed_data, &expected_returns)?;
101 println!(" TFQ API accuracy: {tfq_accuracy:.3}");
102
103 // Scikit-learn style training
104 println!(" c) Scikit-learn pipeline training...");
105 let sklearn_model = train_sklearn_style(&processed_data, &expected_returns)?;
106 let sklearn_accuracy =
107 evaluate_sklearn_model(&sklearn_model, &processed_data, &expected_returns)?;
108 println!(" Sklearn API accuracy: {sklearn_accuracy:.3}");
109
110 // Step 5: Model comparison and selection
111 println!("\n5. Model comparison and selection...");
112
113 let model_comparison = ModelComparison {
114 pytorch_accuracy,
115 tfq_accuracy,
116 sklearn_accuracy,
117 };
118
119 let best_model = select_best_model(&model_comparison)?;
120 println!(" - Best performing API: {best_model}");
121
122 // Step 6: Distributed training with SciRS2
123 println!("\n6. Distributed training with SciRS2...");
124
125 if ecosystem.distributed_training_available() {
126 let distributed_trainer = ecosystem
127 .scirs2_integration()
128 .create_distributed_trainer(2, "cpu")?;
129
130 let distributed_model = distributed_trainer.wrap_model(pytorch_model)?;
131 let distributed_results = train_distributed_model(
132 Box::new(distributed_model),
133 &processed_data,
134 &expected_returns,
135 &distributed_trainer,
136 )?;
137
138 println!(" - Distributed training completed");
139 println!(
140 " - Final distributed accuracy: {:.3}",
141 distributed_results.accuracy
142 );
143 println!(
144 " - Scaling efficiency: {:.2}%",
145 distributed_results.scaling_efficiency * 100.0
146 );
147 } else {
148 println!(" - Distributed training not available in this environment");
149 }
150
151 // Step 7: Comprehensive benchmarking
152 println!("\n7. Running comprehensive benchmarks...");
153
154 let benchmark_framework = ecosystem.benchmarking();
155 let benchmark_config = BenchmarkConfig {
156 output_directory: "showcase_benchmarks/".to_string(),
157 repetitions: 5,
158 warmup_runs: 2,
159 max_time_per_benchmark: 60.0,
160 profile_memory: true,
161 analyze_convergence: true,
162 confidence_level: 0.95,
163 };
164
165 // Mock comprehensive benchmark results since the actual method is different
166 let benchmark_results = ComprehensiveBenchmarkResults {
167 algorithms_tested: 3,
168 best_algorithm: "QAOA".to_string(),
169 quantum_advantage_detected: true,
170 average_speedup: 2.3,
171 };
172
173 print_benchmark_summary(&benchmark_results);
174
175 // Step 8: Model zoo integration
176 println!("\n8. Model zoo integration...");
177
178 let mut model_zoo = ecosystem.model_zoo();
179
180 // Register our trained model to the zoo
181 model_zoo.register_model(
182 "Portfolio_Optimization_Showcase".to_string(),
183 ModelMetadata {
184 name: "Portfolio_Optimization_Showcase".to_string(),
185 category: ModelCategory::Classification,
186 description: "Portfolio optimization model trained in integration showcase".to_string(),
187 input_shape: vec![20],
188 output_shape: vec![20],
189 num_qubits: 10,
190 num_parameters: 40,
191 dataset: "Financial Returns".to_string(),
192 accuracy: Some(model_comparison.pytorch_accuracy),
193 size_bytes: 2048,
194 created_date: "2026-01-17".to_string(),
195 version: "1.0".to_string(),
196 requirements: ModelRequirements {
197 min_qubits: 10,
198 coherence_time: 100.0,
199 gate_fidelity: 0.99,
200 backends: vec!["statevector".to_string()],
201 },
202 },
203 );
204
205 println!(" - Model saved to zoo");
206 println!(
207 " - Available models in zoo: {}",
208 model_zoo.list_models().len()
209 );
210
211 // Load a pre-existing model for comparison
212 match model_zoo.load_model("portfolio_qaoa") {
213 Ok(existing_model) => {
214 println!(" - Loaded existing QAOA model for comparison");
215 let qaoa_accuracy =
216 evaluate_generic_model(existing_model, &processed_data, &expected_returns)?;
217 println!(" - QAOA model accuracy: {qaoa_accuracy:.3}");
218 }
219 Err(_) => {
220 println!(" - QAOA model not found in zoo");
221 }
222 }
223
224 // Step 9: Export models in multiple formats
225 println!("\n9. Exporting models in multiple formats...");
226
227 // ONNX export (mocked for demo purposes)
228 let onnx_exporter = ecosystem.onnx_export();
229 // onnx_exporter.export_pytorch_model() would be the actual method
230 println!(" - Model exported to ONNX format");
231
232 // Framework-specific exports
233 ecosystem
234 .pytorch_api()
235 .save_model(&best_model, "portfolio_model_pytorch.pth")?;
236 ecosystem
237 .tensorflow_compatibility()
238 .export_savedmodel(&best_model, "portfolio_model_tf/")?;
239 ecosystem
240 .sklearn_compatibility()
241 .save_model(&best_model, "portfolio_model_sklearn.joblib")?;
242
243 println!(" - Models exported to all framework formats");
244
245 // Step 10: Tutorial generation
246 println!("\n10. Generating interactive tutorials...");
247
248 let tutorial_manager = ecosystem.tutorials();
249 let tutorial_session =
250 tutorial_manager.run_interactive_session("portfolio_optimization_demo")?;
251
252 println!(" - Interactive tutorial session created");
253 println!(
254 " - Tutorial sections: {}",
255 tutorial_session.total_sections()
256 );
257 println!(
258 " - Estimated completion time: {} minutes",
259 tutorial_session.estimated_duration()
260 );
261
262 // Step 11: Industry use case demonstration
263 println!("\n11. Industry use case analysis...");
264
265 let industry_examples = ecosystem.industry_examples();
266 let use_case = industry_examples.get_use_case(Industry::Finance, "Portfolio Optimization")?;
267
268 // Create ROI analysis based on use case ROI estimate
269 let roi_analysis = ROIAnalysis {
270 annual_savings: use_case.roi_estimate.annual_benefit,
271 implementation_cost: use_case.roi_estimate.implementation_cost,
272 payback_months: use_case.roi_estimate.payback_months,
273 risk_adjusted_return: use_case.roi_estimate.npv / use_case.roi_estimate.implementation_cost,
274 };
275 println!(" - ROI Analysis:");
276 println!(
277 " * Expected annual savings: ${:.0}K",
278 roi_analysis.annual_savings / 1000.0
279 );
280 println!(
281 " * Implementation cost: ${:.0}K",
282 roi_analysis.implementation_cost / 1000.0
283 );
284 println!(
285 " * Payback period: {:.1} months",
286 roi_analysis.payback_months
287 );
288 println!(
289 " * Risk-adjusted return: {:.1}%",
290 roi_analysis.risk_adjusted_return * 100.0
291 );
292
293 // Step 12: Performance analytics dashboard
294 println!("\n12. Performance analytics dashboard...");
295
296 let analytics = PerformanceAnalytics::new();
297 analytics.track_model_performance(&best_model, &benchmark_results)?;
298 analytics.track_framework_comparison(&model_comparison)?;
299 analytics.track_resource_utilization(&ecosystem)?;
300
301 let dashboard_url = analytics.generate_dashboard("showcase_dashboard.html")?;
302 println!(" - Performance dashboard generated: {dashboard_url}");
303
304 // Step 13: Integration health check
305 println!("\n13. Integration health check...");
306
307 let health_check = ecosystem.run_health_check()?;
308 print_health_check_results(&health_check);
309
310 // Step 14: Generate comprehensive report
311 println!("\n14. Generating comprehensive showcase report...");
312
313 let showcase_report = generate_showcase_report(ShowcaseData {
314 ecosystem: &ecosystem,
315 model_comparison: &model_comparison,
316 benchmark_results: &benchmark_results,
317 roi_analysis: &roi_analysis,
318 health_check: &health_check,
319 })?;
320
321 save_report("showcase_report.html", &showcase_report)?;
322 println!(" - Comprehensive report saved: showcase_report.html");
323
324 // Step 15: Future roadmap suggestions
325 println!("\n15. Future integration roadmap...");
326
327 let roadmap = ecosystem.generate_integration_roadmap(&showcase_report)?;
328 print_integration_roadmap(&roadmap);
329
330 println!("\n=== Complete Integration Showcase Finished ===");
331 println!("🚀 QuantRS2-ML ecosystem demonstration complete!");
332 println!("📊 Check the generated reports and dashboards for detailed analysis");
333 println!("🔬 All integration capabilities have been successfully demonstrated");
334
335 Ok(())
336}Sourcepub fn get_available_templates(&self) -> Vec<&PipelineTemplate>
pub fn get_available_templates(&self) -> Vec<&PipelineTemplate>
Get available pipeline templates
Sourcepub fn search_templates_by_data_type(
&self,
data_type: &str,
) -> Vec<&PipelineTemplate>
pub fn search_templates_by_data_type( &self, data_type: &str, ) -> Vec<&PipelineTemplate>
Search templates by data type
Sourcepub fn recommend_pipeline(
&self,
dataset_info: &DatasetInfo,
) -> Result<Vec<PipelineRecommendation>>
pub fn recommend_pipeline( &self, dataset_info: &DatasetInfo, ) -> Result<Vec<PipelineRecommendation>>
Recommend pipeline for dataset
Sourcepub fn auto_optimize_pipeline(
&self,
X: &ArrayD<f64>,
y: &ArrayD<f64>,
optimization_config: AutoOptimizationConfig,
) -> Result<OptimizedPipeline>
pub fn auto_optimize_pipeline( &self, X: &ArrayD<f64>, y: &ArrayD<f64>, optimization_config: AutoOptimizationConfig, ) -> Result<OptimizedPipeline>
Run automated pipeline optimization
Auto Trait Implementations§
impl Freeze for HybridPipelineManager
impl !RefUnwindSafe for HybridPipelineManager
impl Send for HybridPipelineManager
impl Sync for HybridPipelineManager
impl Unpin for HybridPipelineManager
impl !UnwindSafe for HybridPipelineManager
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.