pub struct SciRS2Array {
pub data: ArrayD<f64>,
pub requires_grad: bool,
pub grad: Option<ArrayD<f64>>,
pub grad_fn: Option<Box<dyn GradFunction>>,
}Expand description
SciRS2 array wrapper for quantum ML operations
Fields§
§data: ArrayD<f64>Array data
requires_grad: boolWhether gradients are required
grad: Option<ArrayD<f64>>Gradient accumulator
grad_fn: Option<Box<dyn GradFunction>>Operation history for backpropagation
Implementations§
Source§impl SciRS2Array
impl SciRS2Array
Sourcepub fn new(data: ArrayD<f64>, requires_grad: bool) -> Self
pub fn new(data: ArrayD<f64>, requires_grad: bool) -> Self
Create a new SciRS2Array
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 44)
24fn main() -> Result<()> {
25 println!("=== SciRS2 Distributed Training Demo ===\n");
26
27 // Step 1: Initialize SciRS2 distributed environment
28 println!("1. Initializing SciRS2 distributed environment...");
29
30 let distributed_trainer = SciRS2DistributedTrainer::new(
31 4, // world_size
32 0, // rank
33 );
34
35 println!(" - Workers: 4");
36 println!(" - Backend: {}", distributed_trainer.backend);
37 println!(" - World size: {}", distributed_trainer.world_size);
38
39 // Step 2: Create SciRS2 tensors and arrays
40 println!("\n2. Creating SciRS2 tensors and arrays...");
41
42 let data_shape = (1000, 8);
43 let mut scirs2_array =
44 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
45 scirs2_array.requires_grad = true;
46
47 // Placeholder for quantum-friendly data initialization
48 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
49
50 println!(" - Array shape: {:?}", scirs2_array.shape());
51 println!(" - Requires grad: {}", scirs2_array.requires_grad);
52 println!(" - Device: CPU"); // Placeholder
53
54 // Create SciRS2 tensor for quantum parameters
55 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
56 let mut quantum_params = SciRS2Array::new(param_data, true);
57
58 // Placeholder for quantum parameter initialization
59 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
60
61 println!(
62 " - Quantum parameters shape: {:?}",
63 quantum_params.data.shape()
64 );
65 println!(
66 " - Parameter range: [{:.4}, {:.4}]",
67 quantum_params
68 .data
69 .iter()
70 .fold(f64::INFINITY, |a, &b| a.min(b)),
71 quantum_params
72 .data
73 .iter()
74 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
75 );
76
77 // Step 3: Setup distributed quantum model
78 println!("\n3. Setting up distributed quantum model...");
79
80 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
81
82 // Wrap model for distributed training
83 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
84
85 println!(
86 " - Model parameters: {}",
87 distributed_model.num_parameters()
88 );
89 println!(" - Distributed: {}", distributed_model.is_distributed());
90
91 // Step 4: Create SciRS2 optimizers
92 println!("\n4. Configuring SciRS2 optimizers...");
93
94 let optimizer = SciRS2Optimizer::new("adam");
95
96 // Configure distributed optimizer
97 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
98
99 println!(" - Optimizer: Adam with SciRS2 backend");
100 println!(" - Learning rate: 0.001"); // Placeholder
101 println!(" - Distributed synchronization: enabled");
102
103 // Step 5: Distributed data loading
104 println!("\n5. Setting up distributed data loading...");
105
106 let dataset = create_large_quantum_dataset(10000, 8)?;
107 println!(" - Dataset created with {} samples", dataset.size);
108 println!(" - Distributed sampling configured");
109
110 // Create data loader
111 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
112
113 println!(" - Total dataset size: {}", data_loader.dataset.size);
114 println!(" - Local batches per worker: 156"); // placeholder
115 println!(" - Global batch size: 64"); // placeholder
116
117 // Step 6: Distributed training loop
118 println!("\n6. Starting distributed training...");
119
120 let num_epochs = 10;
121 let mut training_metrics = SciRS2TrainingMetrics::new();
122
123 for epoch in 0..num_epochs {
124 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
125
126 let mut epoch_loss = 0.0;
127 let mut num_batches = 0;
128
129 for (batch_idx, (data, targets)) in data_loader.enumerate() {
130 // Convert to SciRS2 tensors
131 let data_tensor = data.clone();
132 let target_tensor = targets.clone();
133
134 // Zero gradients
135 // distributed_optimizer.zero_grad()?; // placeholder
136
137 // Forward pass
138 let outputs = distributed_model.forward(&data_tensor)?;
139 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
140
141 // Backward pass with automatic differentiation
142 // loss.backward()?; // placeholder
143
144 // Gradient synchronization across workers
145 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
146
147 // Optimizer step
148 // distributed_optimizer.step()?; // placeholder
149
150 epoch_loss += loss.data.iter().sum::<f64>();
151 num_batches += 1;
152
153 if batch_idx % 10 == 0 {
154 println!(
155 " Epoch {}, Batch {}: loss = {:.6}",
156 epoch,
157 batch_idx,
158 loss.data.iter().sum::<f64>()
159 );
160 }
161 }
162
163 // Collect metrics across all workers
164 let avg_loss =
165 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
166 training_metrics.record_epoch(epoch, avg_loss);
167
168 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
169 }
170
171 // Step 7: Distributed evaluation
172 println!("\n7. Distributed model evaluation...");
173
174 let test_dataset = create_test_quantum_dataset(2000, 8)?;
175 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
176 println!(
177 " - Test dataset configured with {} samples",
178 test_dataset.size
179 );
180
181 let evaluation_results = evaluate_distributed_model(
182 &distributed_model,
183 &mut SciRS2DataLoader::new(test_dataset, 64),
184 &distributed_trainer,
185 )?;
186
187 println!(" Distributed Evaluation Results:");
188 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
189 println!(" - Test loss: {:.6}", evaluation_results.loss);
190 println!(
191 " - Quantum fidelity: {:.4}",
192 evaluation_results.quantum_fidelity
193 );
194
195 // Step 8: SciRS2 tensor operations
196 println!("\n8. Demonstrating SciRS2 tensor operations...");
197
198 // Advanced tensor operations
199 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
200 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
201
202 // Matrix multiplication with automatic broadcasting
203 let result = tensor_a.matmul(&tensor_b)?;
204 println!(
205 " - Matrix multiplication: {:?} x {:?} = {:?}",
206 tensor_a.shape(),
207 tensor_b.shape(),
208 result.shape()
209 );
210
211 // Quantum-specific operations
212 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
213 // Placeholder for quantum evolution
214 let evolved_state = quantum_state;
215 let fidelity = 0.95; // Mock fidelity
216
217 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
218
219 // Placeholder for distributed tensor operations
220 let distributed_tensor = tensor_a;
221 let local_computation = distributed_tensor.sum(None)?;
222 let global_result = local_computation;
223
224 println!(
225 " - Distributed computation result shape: {:?}",
226 global_result.shape()
227 );
228
229 // Step 9: Scientific computing features
230 println!("\n9. SciRS2 scientific computing features...");
231
232 // Numerical integration for quantum expectation values
233 let observable = create_quantum_observable(4)?;
234 let expectation_value = 0.5; // Mock expectation value
235 println!(" - Quantum expectation value: {expectation_value:.6}");
236
237 // Optimization with scientific methods
238 let mut optimization_result = OptimizationResult {
239 converged: true,
240 final_value: compute_quantum_energy(&quantum_params)?,
241 num_iterations: 42,
242 };
243
244 println!(
245 " - LBFGS optimization converged: {}",
246 optimization_result.converged
247 );
248 println!(" - Final energy: {:.8}", optimization_result.final_value);
249 println!(" - Iterations: {}", optimization_result.num_iterations);
250
251 // Step 10: Model serialization with SciRS2
252 println!("\n10. SciRS2 model serialization...");
253
254 let serializer = SciRS2Serializer;
255
256 // Save distributed model
257 SciRS2Serializer::save_model(
258 &distributed_model.state_dict(),
259 "distributed_quantum_model.h5",
260 )?;
261 println!(" - Model saved with SciRS2 serializer");
262
263 // Save training state for checkpointing
264 let checkpoint = SciRS2Checkpoint {
265 model_state: distributed_model.state_dict(),
266 optimizer_state: HashMap::new(), // Placeholder for optimizer state
267 epoch: num_epochs,
268 metrics: training_metrics.clone(),
269 };
270
271 SciRS2Serializer::save_checkpoint(
272 &checkpoint.model_state,
273 &SciRS2Optimizer::new("adam"),
274 checkpoint.epoch,
275 "training_checkpoint.h5",
276 )?;
277 println!(" - Training checkpoint saved");
278
279 // Load and verify
280 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
281 println!(" - Model loaded successfully");
282
283 // Step 11: Performance analysis
284 println!("\n11. Distributed training performance analysis...");
285
286 let performance_metrics = PerformanceMetrics {
287 communication_overhead: 0.15,
288 scaling_efficiency: 0.85,
289 memory_usage_gb: 2.5,
290 avg_batch_time: 0.042,
291 };
292
293 println!(" Performance Metrics:");
294 println!(
295 " - Communication overhead: {:.2}%",
296 performance_metrics.communication_overhead * 100.0
297 );
298 println!(
299 " - Scaling efficiency: {:.2}%",
300 performance_metrics.scaling_efficiency * 100.0
301 );
302 println!(
303 " - Memory usage per worker: {:.1} GB",
304 performance_metrics.memory_usage_gb
305 );
306 println!(
307 " - Average batch processing time: {:.3}s",
308 performance_metrics.avg_batch_time
309 );
310
311 // Step 12: Cleanup distributed environment
312 println!("\n12. Cleaning up distributed environment...");
313
314 // distributed_trainer.cleanup()?; // Placeholder
315 println!(" - Distributed training environment cleaned up");
316
317 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
318
319 Ok(())
320}
321
322fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
323 DistributedQuantumModel::new(
324 4, // num_qubits
325 3, // num_layers
326 "hardware_efficient", // ansatz_type
327 params.to_scirs2()?, // parameters
328 "expectation_value", // measurement_type
329 )
330}
331
332fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
333 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
334 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
335
336 SciRS2Dataset::new(data, labels)
337}
338
339fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
340 create_large_quantum_dataset(num_samples, num_features)
341}
342
343fn compute_quantum_loss(
344 outputs: &dyn SciRS2Tensor,
345 targets: &dyn SciRS2Tensor,
346) -> Result<SciRS2Array> {
347 // Quantum-aware loss function (placeholder implementation)
348 let outputs_array = outputs.to_scirs2()?;
349 let targets_array = targets.to_scirs2()?;
350 let diff = &outputs_array.data - &targets_array.data;
351 let mse_data = &diff * &diff;
352 let mse_loss = SciRS2Array::new(
353 mse_data
354 .mean_axis(scirs2_core::ndarray::Axis(0))
355 .unwrap()
356 .into_dyn(),
357 false,
358 );
359 Ok(mse_loss)
360}
361
362fn evaluate_distributed_model(
363 model: &DistributedQuantumModel,
364 test_loader: &mut SciRS2DataLoader,
365 trainer: &SciRS2DistributedTrainer,
366) -> Result<EvaluationResults> {
367 let mut total_loss = 0.0;
368 let mut total_accuracy = 0.0;
369 let mut total_fidelity = 0.0;
370 let mut num_batches = 0;
371
372 for _batch_idx in 0..10 {
373 // Mock evaluation loop
374 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
375 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
376 let outputs = model.forward(&data)?;
377 let loss = compute_quantum_loss(&outputs, &targets)?;
378
379 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
380 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
381
382 total_loss += loss.data.iter().sum::<f64>();
383 total_accuracy += batch_accuracy;
384 total_fidelity += batch_fidelity;
385 num_batches += 1;
386 }
387
388 // Average across all workers
389 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
390 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
391 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
392
393 Ok(EvaluationResults {
394 loss: avg_loss,
395 accuracy: avg_accuracy,
396 quantum_fidelity: avg_fidelity,
397 })
398}
399
400fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
401 // Create Pauli-Z observable for all qubits
402 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
403}
404
405fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
406 // Mock quantum energy computation
407 let params_array = params.to_scirs2()?;
408 let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
409 let sum_abs = params_array.data.iter().sum::<f64>().abs();
410 let energy = 0.5f64.mul_add(sum_abs, norm_squared);
411 Ok(energy)
412}
413
414fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
415 // Mock gradient computation using parameter shift rule
416 // Mock gradient computation using parameter shift rule
417 let params_array = params.to_scirs2()?;
418 let gradient_data = ¶ms_array.data * 2.0 + 0.5;
419 let gradient = SciRS2Array::new(gradient_data, false);
420 Ok(gradient)
421}Sourcepub fn from_array<D: Dimension>(arr: Array<f64, D>) -> Self
pub fn from_array<D: Dimension>(arr: Array<f64, D>) -> Self
Create from ndarray
Examples found in repository?
examples/pytorch_integration_demo.rs (line 182)
144fn create_quantum_datasets() -> Result<(MemoryDataLoader, MemoryDataLoader)> {
145 // Create synthetic quantum-friendly dataset
146 let num_train = 800;
147 let num_test = 200;
148 let num_features = 4;
149
150 // Training data with quantum entanglement patterns
151 let train_data = Array2::from_shape_fn((num_train, num_features), |(i, j)| {
152 let phase = (i as f64).mul_add(0.1, j as f64 * 0.2);
153 (phase.sin() + (phase * 2.0).cos()) * 0.5
154 });
155
156 let train_labels = Array1::from_shape_fn(num_train, |i| {
157 // Create labels based on quantum-like correlations
158 let sum = (0..num_features).map(|j| train_data[[i, j]]).sum::<f64>();
159 if sum > 0.0 {
160 1.0
161 } else {
162 0.0
163 }
164 });
165
166 // Test data
167 let test_data = Array2::from_shape_fn((num_test, num_features), |(i, j)| {
168 let phase = (i as f64).mul_add(0.15, j as f64 * 0.25);
169 (phase.sin() + (phase * 2.0).cos()) * 0.5
170 });
171
172 let test_labels = Array1::from_shape_fn(num_test, |i| {
173 let sum = (0..num_features).map(|j| test_data[[i, j]]).sum::<f64>();
174 if sum > 0.0 {
175 1.0
176 } else {
177 0.0
178 }
179 });
180
181 let train_loader = MemoryDataLoader::new(
182 SciRS2Array::from_array(train_data.into_dyn()),
183 SciRS2Array::from_array(train_labels.into_dyn()),
184 32,
185 true,
186 )?;
187 let test_loader = MemoryDataLoader::new(
188 SciRS2Array::from_array(test_data.into_dyn()),
189 SciRS2Array::from_array(test_labels.into_dyn()),
190 32,
191 false,
192 )?;
193
194 Ok((train_loader, test_loader))
195}Sourcepub fn matmul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn matmul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Matrix multiplication using SciRS2 backend
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 203)
24fn main() -> Result<()> {
25 println!("=== SciRS2 Distributed Training Demo ===\n");
26
27 // Step 1: Initialize SciRS2 distributed environment
28 println!("1. Initializing SciRS2 distributed environment...");
29
30 let distributed_trainer = SciRS2DistributedTrainer::new(
31 4, // world_size
32 0, // rank
33 );
34
35 println!(" - Workers: 4");
36 println!(" - Backend: {}", distributed_trainer.backend);
37 println!(" - World size: {}", distributed_trainer.world_size);
38
39 // Step 2: Create SciRS2 tensors and arrays
40 println!("\n2. Creating SciRS2 tensors and arrays...");
41
42 let data_shape = (1000, 8);
43 let mut scirs2_array =
44 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
45 scirs2_array.requires_grad = true;
46
47 // Placeholder for quantum-friendly data initialization
48 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
49
50 println!(" - Array shape: {:?}", scirs2_array.shape());
51 println!(" - Requires grad: {}", scirs2_array.requires_grad);
52 println!(" - Device: CPU"); // Placeholder
53
54 // Create SciRS2 tensor for quantum parameters
55 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
56 let mut quantum_params = SciRS2Array::new(param_data, true);
57
58 // Placeholder for quantum parameter initialization
59 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
60
61 println!(
62 " - Quantum parameters shape: {:?}",
63 quantum_params.data.shape()
64 );
65 println!(
66 " - Parameter range: [{:.4}, {:.4}]",
67 quantum_params
68 .data
69 .iter()
70 .fold(f64::INFINITY, |a, &b| a.min(b)),
71 quantum_params
72 .data
73 .iter()
74 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
75 );
76
77 // Step 3: Setup distributed quantum model
78 println!("\n3. Setting up distributed quantum model...");
79
80 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
81
82 // Wrap model for distributed training
83 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
84
85 println!(
86 " - Model parameters: {}",
87 distributed_model.num_parameters()
88 );
89 println!(" - Distributed: {}", distributed_model.is_distributed());
90
91 // Step 4: Create SciRS2 optimizers
92 println!("\n4. Configuring SciRS2 optimizers...");
93
94 let optimizer = SciRS2Optimizer::new("adam");
95
96 // Configure distributed optimizer
97 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
98
99 println!(" - Optimizer: Adam with SciRS2 backend");
100 println!(" - Learning rate: 0.001"); // Placeholder
101 println!(" - Distributed synchronization: enabled");
102
103 // Step 5: Distributed data loading
104 println!("\n5. Setting up distributed data loading...");
105
106 let dataset = create_large_quantum_dataset(10000, 8)?;
107 println!(" - Dataset created with {} samples", dataset.size);
108 println!(" - Distributed sampling configured");
109
110 // Create data loader
111 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
112
113 println!(" - Total dataset size: {}", data_loader.dataset.size);
114 println!(" - Local batches per worker: 156"); // placeholder
115 println!(" - Global batch size: 64"); // placeholder
116
117 // Step 6: Distributed training loop
118 println!("\n6. Starting distributed training...");
119
120 let num_epochs = 10;
121 let mut training_metrics = SciRS2TrainingMetrics::new();
122
123 for epoch in 0..num_epochs {
124 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
125
126 let mut epoch_loss = 0.0;
127 let mut num_batches = 0;
128
129 for (batch_idx, (data, targets)) in data_loader.enumerate() {
130 // Convert to SciRS2 tensors
131 let data_tensor = data.clone();
132 let target_tensor = targets.clone();
133
134 // Zero gradients
135 // distributed_optimizer.zero_grad()?; // placeholder
136
137 // Forward pass
138 let outputs = distributed_model.forward(&data_tensor)?;
139 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
140
141 // Backward pass with automatic differentiation
142 // loss.backward()?; // placeholder
143
144 // Gradient synchronization across workers
145 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
146
147 // Optimizer step
148 // distributed_optimizer.step()?; // placeholder
149
150 epoch_loss += loss.data.iter().sum::<f64>();
151 num_batches += 1;
152
153 if batch_idx % 10 == 0 {
154 println!(
155 " Epoch {}, Batch {}: loss = {:.6}",
156 epoch,
157 batch_idx,
158 loss.data.iter().sum::<f64>()
159 );
160 }
161 }
162
163 // Collect metrics across all workers
164 let avg_loss =
165 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
166 training_metrics.record_epoch(epoch, avg_loss);
167
168 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
169 }
170
171 // Step 7: Distributed evaluation
172 println!("\n7. Distributed model evaluation...");
173
174 let test_dataset = create_test_quantum_dataset(2000, 8)?;
175 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
176 println!(
177 " - Test dataset configured with {} samples",
178 test_dataset.size
179 );
180
181 let evaluation_results = evaluate_distributed_model(
182 &distributed_model,
183 &mut SciRS2DataLoader::new(test_dataset, 64),
184 &distributed_trainer,
185 )?;
186
187 println!(" Distributed Evaluation Results:");
188 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
189 println!(" - Test loss: {:.6}", evaluation_results.loss);
190 println!(
191 " - Quantum fidelity: {:.4}",
192 evaluation_results.quantum_fidelity
193 );
194
195 // Step 8: SciRS2 tensor operations
196 println!("\n8. Demonstrating SciRS2 tensor operations...");
197
198 // Advanced tensor operations
199 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
200 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
201
202 // Matrix multiplication with automatic broadcasting
203 let result = tensor_a.matmul(&tensor_b)?;
204 println!(
205 " - Matrix multiplication: {:?} x {:?} = {:?}",
206 tensor_a.shape(),
207 tensor_b.shape(),
208 result.shape()
209 );
210
211 // Quantum-specific operations
212 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
213 // Placeholder for quantum evolution
214 let evolved_state = quantum_state;
215 let fidelity = 0.95; // Mock fidelity
216
217 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
218
219 // Placeholder for distributed tensor operations
220 let distributed_tensor = tensor_a;
221 let local_computation = distributed_tensor.sum(None)?;
222 let global_result = local_computation;
223
224 println!(
225 " - Distributed computation result shape: {:?}",
226 global_result.shape()
227 );
228
229 // Step 9: Scientific computing features
230 println!("\n9. SciRS2 scientific computing features...");
231
232 // Numerical integration for quantum expectation values
233 let observable = create_quantum_observable(4)?;
234 let expectation_value = 0.5; // Mock expectation value
235 println!(" - Quantum expectation value: {expectation_value:.6}");
236
237 // Optimization with scientific methods
238 let mut optimization_result = OptimizationResult {
239 converged: true,
240 final_value: compute_quantum_energy(&quantum_params)?,
241 num_iterations: 42,
242 };
243
244 println!(
245 " - LBFGS optimization converged: {}",
246 optimization_result.converged
247 );
248 println!(" - Final energy: {:.8}", optimization_result.final_value);
249 println!(" - Iterations: {}", optimization_result.num_iterations);
250
251 // Step 10: Model serialization with SciRS2
252 println!("\n10. SciRS2 model serialization...");
253
254 let serializer = SciRS2Serializer;
255
256 // Save distributed model
257 SciRS2Serializer::save_model(
258 &distributed_model.state_dict(),
259 "distributed_quantum_model.h5",
260 )?;
261 println!(" - Model saved with SciRS2 serializer");
262
263 // Save training state for checkpointing
264 let checkpoint = SciRS2Checkpoint {
265 model_state: distributed_model.state_dict(),
266 optimizer_state: HashMap::new(), // Placeholder for optimizer state
267 epoch: num_epochs,
268 metrics: training_metrics.clone(),
269 };
270
271 SciRS2Serializer::save_checkpoint(
272 &checkpoint.model_state,
273 &SciRS2Optimizer::new("adam"),
274 checkpoint.epoch,
275 "training_checkpoint.h5",
276 )?;
277 println!(" - Training checkpoint saved");
278
279 // Load and verify
280 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
281 println!(" - Model loaded successfully");
282
283 // Step 11: Performance analysis
284 println!("\n11. Distributed training performance analysis...");
285
286 let performance_metrics = PerformanceMetrics {
287 communication_overhead: 0.15,
288 scaling_efficiency: 0.85,
289 memory_usage_gb: 2.5,
290 avg_batch_time: 0.042,
291 };
292
293 println!(" Performance Metrics:");
294 println!(
295 " - Communication overhead: {:.2}%",
296 performance_metrics.communication_overhead * 100.0
297 );
298 println!(
299 " - Scaling efficiency: {:.2}%",
300 performance_metrics.scaling_efficiency * 100.0
301 );
302 println!(
303 " - Memory usage per worker: {:.1} GB",
304 performance_metrics.memory_usage_gb
305 );
306 println!(
307 " - Average batch processing time: {:.3}s",
308 performance_metrics.avg_batch_time
309 );
310
311 // Step 12: Cleanup distributed environment
312 println!("\n12. Cleaning up distributed environment...");
313
314 // distributed_trainer.cleanup()?; // Placeholder
315 println!(" - Distributed training environment cleaned up");
316
317 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
318
319 Ok(())
320}Sourcepub fn add(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn add(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Element-wise addition
Sourcepub fn mul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn mul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Element-wise multiplication
Sourcepub fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>
pub fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>
Reduction sum
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 221)
24fn main() -> Result<()> {
25 println!("=== SciRS2 Distributed Training Demo ===\n");
26
27 // Step 1: Initialize SciRS2 distributed environment
28 println!("1. Initializing SciRS2 distributed environment...");
29
30 let distributed_trainer = SciRS2DistributedTrainer::new(
31 4, // world_size
32 0, // rank
33 );
34
35 println!(" - Workers: 4");
36 println!(" - Backend: {}", distributed_trainer.backend);
37 println!(" - World size: {}", distributed_trainer.world_size);
38
39 // Step 2: Create SciRS2 tensors and arrays
40 println!("\n2. Creating SciRS2 tensors and arrays...");
41
42 let data_shape = (1000, 8);
43 let mut scirs2_array =
44 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
45 scirs2_array.requires_grad = true;
46
47 // Placeholder for quantum-friendly data initialization
48 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
49
50 println!(" - Array shape: {:?}", scirs2_array.shape());
51 println!(" - Requires grad: {}", scirs2_array.requires_grad);
52 println!(" - Device: CPU"); // Placeholder
53
54 // Create SciRS2 tensor for quantum parameters
55 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
56 let mut quantum_params = SciRS2Array::new(param_data, true);
57
58 // Placeholder for quantum parameter initialization
59 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
60
61 println!(
62 " - Quantum parameters shape: {:?}",
63 quantum_params.data.shape()
64 );
65 println!(
66 " - Parameter range: [{:.4}, {:.4}]",
67 quantum_params
68 .data
69 .iter()
70 .fold(f64::INFINITY, |a, &b| a.min(b)),
71 quantum_params
72 .data
73 .iter()
74 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
75 );
76
77 // Step 3: Setup distributed quantum model
78 println!("\n3. Setting up distributed quantum model...");
79
80 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
81
82 // Wrap model for distributed training
83 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
84
85 println!(
86 " - Model parameters: {}",
87 distributed_model.num_parameters()
88 );
89 println!(" - Distributed: {}", distributed_model.is_distributed());
90
91 // Step 4: Create SciRS2 optimizers
92 println!("\n4. Configuring SciRS2 optimizers...");
93
94 let optimizer = SciRS2Optimizer::new("adam");
95
96 // Configure distributed optimizer
97 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
98
99 println!(" - Optimizer: Adam with SciRS2 backend");
100 println!(" - Learning rate: 0.001"); // Placeholder
101 println!(" - Distributed synchronization: enabled");
102
103 // Step 5: Distributed data loading
104 println!("\n5. Setting up distributed data loading...");
105
106 let dataset = create_large_quantum_dataset(10000, 8)?;
107 println!(" - Dataset created with {} samples", dataset.size);
108 println!(" - Distributed sampling configured");
109
110 // Create data loader
111 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
112
113 println!(" - Total dataset size: {}", data_loader.dataset.size);
114 println!(" - Local batches per worker: 156"); // placeholder
115 println!(" - Global batch size: 64"); // placeholder
116
117 // Step 6: Distributed training loop
118 println!("\n6. Starting distributed training...");
119
120 let num_epochs = 10;
121 let mut training_metrics = SciRS2TrainingMetrics::new();
122
123 for epoch in 0..num_epochs {
124 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
125
126 let mut epoch_loss = 0.0;
127 let mut num_batches = 0;
128
129 for (batch_idx, (data, targets)) in data_loader.enumerate() {
130 // Convert to SciRS2 tensors
131 let data_tensor = data.clone();
132 let target_tensor = targets.clone();
133
134 // Zero gradients
135 // distributed_optimizer.zero_grad()?; // placeholder
136
137 // Forward pass
138 let outputs = distributed_model.forward(&data_tensor)?;
139 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
140
141 // Backward pass with automatic differentiation
142 // loss.backward()?; // placeholder
143
144 // Gradient synchronization across workers
145 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
146
147 // Optimizer step
148 // distributed_optimizer.step()?; // placeholder
149
150 epoch_loss += loss.data.iter().sum::<f64>();
151 num_batches += 1;
152
153 if batch_idx % 10 == 0 {
154 println!(
155 " Epoch {}, Batch {}: loss = {:.6}",
156 epoch,
157 batch_idx,
158 loss.data.iter().sum::<f64>()
159 );
160 }
161 }
162
163 // Collect metrics across all workers
164 let avg_loss =
165 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
166 training_metrics.record_epoch(epoch, avg_loss);
167
168 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
169 }
170
171 // Step 7: Distributed evaluation
172 println!("\n7. Distributed model evaluation...");
173
174 let test_dataset = create_test_quantum_dataset(2000, 8)?;
175 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
176 println!(
177 " - Test dataset configured with {} samples",
178 test_dataset.size
179 );
180
181 let evaluation_results = evaluate_distributed_model(
182 &distributed_model,
183 &mut SciRS2DataLoader::new(test_dataset, 64),
184 &distributed_trainer,
185 )?;
186
187 println!(" Distributed Evaluation Results:");
188 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
189 println!(" - Test loss: {:.6}", evaluation_results.loss);
190 println!(
191 " - Quantum fidelity: {:.4}",
192 evaluation_results.quantum_fidelity
193 );
194
195 // Step 8: SciRS2 tensor operations
196 println!("\n8. Demonstrating SciRS2 tensor operations...");
197
198 // Advanced tensor operations
199 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
200 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
201
202 // Matrix multiplication with automatic broadcasting
203 let result = tensor_a.matmul(&tensor_b)?;
204 println!(
205 " - Matrix multiplication: {:?} x {:?} = {:?}",
206 tensor_a.shape(),
207 tensor_b.shape(),
208 result.shape()
209 );
210
211 // Quantum-specific operations
212 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
213 // Placeholder for quantum evolution
214 let evolved_state = quantum_state;
215 let fidelity = 0.95; // Mock fidelity
216
217 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
218
219 // Placeholder for distributed tensor operations
220 let distributed_tensor = tensor_a;
221 let local_computation = distributed_tensor.sum(None)?;
222 let global_result = local_computation;
223
224 println!(
225 " - Distributed computation result shape: {:?}",
226 global_result.shape()
227 );
228
229 // Step 9: Scientific computing features
230 println!("\n9. SciRS2 scientific computing features...");
231
232 // Numerical integration for quantum expectation values
233 let observable = create_quantum_observable(4)?;
234 let expectation_value = 0.5; // Mock expectation value
235 println!(" - Quantum expectation value: {expectation_value:.6}");
236
237 // Optimization with scientific methods
238 let mut optimization_result = OptimizationResult {
239 converged: true,
240 final_value: compute_quantum_energy(&quantum_params)?,
241 num_iterations: 42,
242 };
243
244 println!(
245 " - LBFGS optimization converged: {}",
246 optimization_result.converged
247 );
248 println!(" - Final energy: {:.8}", optimization_result.final_value);
249 println!(" - Iterations: {}", optimization_result.num_iterations);
250
251 // Step 10: Model serialization with SciRS2
252 println!("\n10. SciRS2 model serialization...");
253
254 let serializer = SciRS2Serializer;
255
256 // Save distributed model
257 SciRS2Serializer::save_model(
258 &distributed_model.state_dict(),
259 "distributed_quantum_model.h5",
260 )?;
261 println!(" - Model saved with SciRS2 serializer");
262
263 // Save training state for checkpointing
264 let checkpoint = SciRS2Checkpoint {
265 model_state: distributed_model.state_dict(),
266 optimizer_state: HashMap::new(), // Placeholder for optimizer state
267 epoch: num_epochs,
268 metrics: training_metrics.clone(),
269 };
270
271 SciRS2Serializer::save_checkpoint(
272 &checkpoint.model_state,
273 &SciRS2Optimizer::new("adam"),
274 checkpoint.epoch,
275 "training_checkpoint.h5",
276 )?;
277 println!(" - Training checkpoint saved");
278
279 // Load and verify
280 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
281 println!(" - Model loaded successfully");
282
283 // Step 11: Performance analysis
284 println!("\n11. Distributed training performance analysis...");
285
286 let performance_metrics = PerformanceMetrics {
287 communication_overhead: 0.15,
288 scaling_efficiency: 0.85,
289 memory_usage_gb: 2.5,
290 avg_batch_time: 0.042,
291 };
292
293 println!(" Performance Metrics:");
294 println!(
295 " - Communication overhead: {:.2}%",
296 performance_metrics.communication_overhead * 100.0
297 );
298 println!(
299 " - Scaling efficiency: {:.2}%",
300 performance_metrics.scaling_efficiency * 100.0
301 );
302 println!(
303 " - Memory usage per worker: {:.1} GB",
304 performance_metrics.memory_usage_gb
305 );
306 println!(
307 " - Average batch processing time: {:.3}s",
308 performance_metrics.avg_batch_time
309 );
310
311 // Step 12: Cleanup distributed environment
312 println!("\n12. Cleaning up distributed environment...");
313
314 // distributed_trainer.cleanup()?; // Placeholder
315 println!(" - Distributed training environment cleaned up");
316
317 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
318
319 Ok(())
320}Source§impl SciRS2Array
Additional SciRS2Array methods for compatibility
impl SciRS2Array
Additional SciRS2Array methods for compatibility
Sourcepub fn randn(shape: Vec<usize>, device: SciRS2Device) -> Result<Self>
pub fn randn(shape: Vec<usize>, device: SciRS2Device) -> Result<Self>
Create array with specified device
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 199)
24fn main() -> Result<()> {
25 println!("=== SciRS2 Distributed Training Demo ===\n");
26
27 // Step 1: Initialize SciRS2 distributed environment
28 println!("1. Initializing SciRS2 distributed environment...");
29
30 let distributed_trainer = SciRS2DistributedTrainer::new(
31 4, // world_size
32 0, // rank
33 );
34
35 println!(" - Workers: 4");
36 println!(" - Backend: {}", distributed_trainer.backend);
37 println!(" - World size: {}", distributed_trainer.world_size);
38
39 // Step 2: Create SciRS2 tensors and arrays
40 println!("\n2. Creating SciRS2 tensors and arrays...");
41
42 let data_shape = (1000, 8);
43 let mut scirs2_array =
44 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
45 scirs2_array.requires_grad = true;
46
47 // Placeholder for quantum-friendly data initialization
48 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
49
50 println!(" - Array shape: {:?}", scirs2_array.shape());
51 println!(" - Requires grad: {}", scirs2_array.requires_grad);
52 println!(" - Device: CPU"); // Placeholder
53
54 // Create SciRS2 tensor for quantum parameters
55 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
56 let mut quantum_params = SciRS2Array::new(param_data, true);
57
58 // Placeholder for quantum parameter initialization
59 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
60
61 println!(
62 " - Quantum parameters shape: {:?}",
63 quantum_params.data.shape()
64 );
65 println!(
66 " - Parameter range: [{:.4}, {:.4}]",
67 quantum_params
68 .data
69 .iter()
70 .fold(f64::INFINITY, |a, &b| a.min(b)),
71 quantum_params
72 .data
73 .iter()
74 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
75 );
76
77 // Step 3: Setup distributed quantum model
78 println!("\n3. Setting up distributed quantum model...");
79
80 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
81
82 // Wrap model for distributed training
83 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
84
85 println!(
86 " - Model parameters: {}",
87 distributed_model.num_parameters()
88 );
89 println!(" - Distributed: {}", distributed_model.is_distributed());
90
91 // Step 4: Create SciRS2 optimizers
92 println!("\n4. Configuring SciRS2 optimizers...");
93
94 let optimizer = SciRS2Optimizer::new("adam");
95
96 // Configure distributed optimizer
97 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
98
99 println!(" - Optimizer: Adam with SciRS2 backend");
100 println!(" - Learning rate: 0.001"); // Placeholder
101 println!(" - Distributed synchronization: enabled");
102
103 // Step 5: Distributed data loading
104 println!("\n5. Setting up distributed data loading...");
105
106 let dataset = create_large_quantum_dataset(10000, 8)?;
107 println!(" - Dataset created with {} samples", dataset.size);
108 println!(" - Distributed sampling configured");
109
110 // Create data loader
111 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
112
113 println!(" - Total dataset size: {}", data_loader.dataset.size);
114 println!(" - Local batches per worker: 156"); // placeholder
115 println!(" - Global batch size: 64"); // placeholder
116
117 // Step 6: Distributed training loop
118 println!("\n6. Starting distributed training...");
119
120 let num_epochs = 10;
121 let mut training_metrics = SciRS2TrainingMetrics::new();
122
123 for epoch in 0..num_epochs {
124 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
125
126 let mut epoch_loss = 0.0;
127 let mut num_batches = 0;
128
129 for (batch_idx, (data, targets)) in data_loader.enumerate() {
130 // Convert to SciRS2 tensors
131 let data_tensor = data.clone();
132 let target_tensor = targets.clone();
133
134 // Zero gradients
135 // distributed_optimizer.zero_grad()?; // placeholder
136
137 // Forward pass
138 let outputs = distributed_model.forward(&data_tensor)?;
139 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
140
141 // Backward pass with automatic differentiation
142 // loss.backward()?; // placeholder
143
144 // Gradient synchronization across workers
145 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
146
147 // Optimizer step
148 // distributed_optimizer.step()?; // placeholder
149
150 epoch_loss += loss.data.iter().sum::<f64>();
151 num_batches += 1;
152
153 if batch_idx % 10 == 0 {
154 println!(
155 " Epoch {}, Batch {}: loss = {:.6}",
156 epoch,
157 batch_idx,
158 loss.data.iter().sum::<f64>()
159 );
160 }
161 }
162
163 // Collect metrics across all workers
164 let avg_loss =
165 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
166 training_metrics.record_epoch(epoch, avg_loss);
167
168 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
169 }
170
171 // Step 7: Distributed evaluation
172 println!("\n7. Distributed model evaluation...");
173
174 let test_dataset = create_test_quantum_dataset(2000, 8)?;
175 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
176 println!(
177 " - Test dataset configured with {} samples",
178 test_dataset.size
179 );
180
181 let evaluation_results = evaluate_distributed_model(
182 &distributed_model,
183 &mut SciRS2DataLoader::new(test_dataset, 64),
184 &distributed_trainer,
185 )?;
186
187 println!(" Distributed Evaluation Results:");
188 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
189 println!(" - Test loss: {:.6}", evaluation_results.loss);
190 println!(
191 " - Quantum fidelity: {:.4}",
192 evaluation_results.quantum_fidelity
193 );
194
195 // Step 8: SciRS2 tensor operations
196 println!("\n8. Demonstrating SciRS2 tensor operations...");
197
198 // Advanced tensor operations
199 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
200 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
201
202 // Matrix multiplication with automatic broadcasting
203 let result = tensor_a.matmul(&tensor_b)?;
204 println!(
205 " - Matrix multiplication: {:?} x {:?} = {:?}",
206 tensor_a.shape(),
207 tensor_b.shape(),
208 result.shape()
209 );
210
211 // Quantum-specific operations
212 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
213 // Placeholder for quantum evolution
214 let evolved_state = quantum_state;
215 let fidelity = 0.95; // Mock fidelity
216
217 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
218
219 // Placeholder for distributed tensor operations
220 let distributed_tensor = tensor_a;
221 let local_computation = distributed_tensor.sum(None)?;
222 let global_result = local_computation;
223
224 println!(
225 " - Distributed computation result shape: {:?}",
226 global_result.shape()
227 );
228
229 // Step 9: Scientific computing features
230 println!("\n9. SciRS2 scientific computing features...");
231
232 // Numerical integration for quantum expectation values
233 let observable = create_quantum_observable(4)?;
234 let expectation_value = 0.5; // Mock expectation value
235 println!(" - Quantum expectation value: {expectation_value:.6}");
236
237 // Optimization with scientific methods
238 let mut optimization_result = OptimizationResult {
239 converged: true,
240 final_value: compute_quantum_energy(&quantum_params)?,
241 num_iterations: 42,
242 };
243
244 println!(
245 " - LBFGS optimization converged: {}",
246 optimization_result.converged
247 );
248 println!(" - Final energy: {:.8}", optimization_result.final_value);
249 println!(" - Iterations: {}", optimization_result.num_iterations);
250
251 // Step 10: Model serialization with SciRS2
252 println!("\n10. SciRS2 model serialization...");
253
254 let serializer = SciRS2Serializer;
255
256 // Save distributed model
257 SciRS2Serializer::save_model(
258 &distributed_model.state_dict(),
259 "distributed_quantum_model.h5",
260 )?;
261 println!(" - Model saved with SciRS2 serializer");
262
263 // Save training state for checkpointing
264 let checkpoint = SciRS2Checkpoint {
265 model_state: distributed_model.state_dict(),
266 optimizer_state: HashMap::new(), // Placeholder for optimizer state
267 epoch: num_epochs,
268 metrics: training_metrics.clone(),
269 };
270
271 SciRS2Serializer::save_checkpoint(
272 &checkpoint.model_state,
273 &SciRS2Optimizer::new("adam"),
274 checkpoint.epoch,
275 "training_checkpoint.h5",
276 )?;
277 println!(" - Training checkpoint saved");
278
279 // Load and verify
280 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
281 println!(" - Model loaded successfully");
282
283 // Step 11: Performance analysis
284 println!("\n11. Distributed training performance analysis...");
285
286 let performance_metrics = PerformanceMetrics {
287 communication_overhead: 0.15,
288 scaling_efficiency: 0.85,
289 memory_usage_gb: 2.5,
290 avg_batch_time: 0.042,
291 };
292
293 println!(" Performance Metrics:");
294 println!(
295 " - Communication overhead: {:.2}%",
296 performance_metrics.communication_overhead * 100.0
297 );
298 println!(
299 " - Scaling efficiency: {:.2}%",
300 performance_metrics.scaling_efficiency * 100.0
301 );
302 println!(
303 " - Memory usage per worker: {:.1} GB",
304 performance_metrics.memory_usage_gb
305 );
306 println!(
307 " - Average batch processing time: {:.3}s",
308 performance_metrics.avg_batch_time
309 );
310
311 // Step 12: Cleanup distributed environment
312 println!("\n12. Cleaning up distributed environment...");
313
314 // distributed_trainer.cleanup()?; // Placeholder
315 println!(" - Distributed training environment cleaned up");
316
317 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
318
319 Ok(())
320}
321
322fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
323 DistributedQuantumModel::new(
324 4, // num_qubits
325 3, // num_layers
326 "hardware_efficient", // ansatz_type
327 params.to_scirs2()?, // parameters
328 "expectation_value", // measurement_type
329 )
330}
331
332fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
333 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
334 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
335
336 SciRS2Dataset::new(data, labels)
337}
338
339fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
340 create_large_quantum_dataset(num_samples, num_features)
341}
342
343fn compute_quantum_loss(
344 outputs: &dyn SciRS2Tensor,
345 targets: &dyn SciRS2Tensor,
346) -> Result<SciRS2Array> {
347 // Quantum-aware loss function (placeholder implementation)
348 let outputs_array = outputs.to_scirs2()?;
349 let targets_array = targets.to_scirs2()?;
350 let diff = &outputs_array.data - &targets_array.data;
351 let mse_data = &diff * &diff;
352 let mse_loss = SciRS2Array::new(
353 mse_data
354 .mean_axis(scirs2_core::ndarray::Axis(0))
355 .unwrap()
356 .into_dyn(),
357 false,
358 );
359 Ok(mse_loss)
360}
361
362fn evaluate_distributed_model(
363 model: &DistributedQuantumModel,
364 test_loader: &mut SciRS2DataLoader,
365 trainer: &SciRS2DistributedTrainer,
366) -> Result<EvaluationResults> {
367 let mut total_loss = 0.0;
368 let mut total_accuracy = 0.0;
369 let mut total_fidelity = 0.0;
370 let mut num_batches = 0;
371
372 for _batch_idx in 0..10 {
373 // Mock evaluation loop
374 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
375 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
376 let outputs = model.forward(&data)?;
377 let loss = compute_quantum_loss(&outputs, &targets)?;
378
379 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
380 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
381
382 total_loss += loss.data.iter().sum::<f64>();
383 total_accuracy += batch_accuracy;
384 total_fidelity += batch_fidelity;
385 num_batches += 1;
386 }
387
388 // Average across all workers
389 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
390 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
391 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
392
393 Ok(EvaluationResults {
394 loss: avg_loss,
395 accuracy: avg_accuracy,
396 quantum_fidelity: avg_fidelity,
397 })
398}
399
400fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
401 // Create Pauli-Z observable for all qubits
402 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
403}
404
405fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
406 // Mock quantum energy computation
407 let params_array = params.to_scirs2()?;
408 let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
409 let sum_abs = params_array.data.iter().sum::<f64>().abs();
410 let energy = 0.5f64.mul_add(sum_abs, norm_squared);
411 Ok(energy)
412}
413
414fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
415 // Mock gradient computation using parameter shift rule
416 // Mock gradient computation using parameter shift rule
417 let params_array = params.to_scirs2()?;
418 let gradient_data = ¶ms_array.data * 2.0 + 0.5;
419 let gradient = SciRS2Array::new(gradient_data, false);
420 Ok(gradient)
421}
422
423fn compute_accuracy(outputs: &dyn SciRS2Tensor, targets: &dyn SciRS2Tensor) -> Result<f64> {
424 // Mock accuracy computation
425 let outputs_array = outputs.to_scirs2()?;
426 let targets_array = targets.to_scirs2()?;
427 // Simplified mock accuracy
428 let correct = 0.85; // Mock accuracy value
429 Ok(correct)
430}
431
432fn compute_quantum_fidelity(outputs: &dyn SciRS2Tensor) -> Result<f64> {
433 // Mock quantum fidelity computation
434 let outputs_array = outputs.to_scirs2()?;
435 let norm = outputs_array.data.iter().map(|x| x * x).sum::<f64>().sqrt();
436 let fidelity = norm / (outputs_array.shape()[0] as f64).sqrt();
437 Ok(fidelity.min(1.0))
438}
439
440// Supporting structures for the demo
441
442#[derive(Clone)]
443struct SciRS2TrainingMetrics {
444 losses: Vec<f64>,
445 epochs: Vec<usize>,
446}
447
448impl SciRS2TrainingMetrics {
449 const fn new() -> Self {
450 Self {
451 losses: Vec::new(),
452 epochs: Vec::new(),
453 }
454 }
455
456 fn record_epoch(&mut self, epoch: usize, loss: f64) {
457 self.epochs.push(epoch);
458 self.losses.push(loss);
459 }
460}
461
462struct EvaluationResults {
463 loss: f64,
464 accuracy: f64,
465 quantum_fidelity: f64,
466}
467
468struct DistributedQuantumModel {
469 num_qubits: usize,
470 parameters: SciRS2Array,
471}
472
473impl DistributedQuantumModel {
474 const fn new(
475 num_qubits: usize,
476 num_layers: usize,
477 ansatz_type: &str,
478 parameters: SciRS2Array,
479 measurement_type: &str,
480 ) -> Result<Self> {
481 Ok(Self {
482 num_qubits,
483 parameters,
484 })
485 }
486
487 fn forward(&self, input: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
488 // Mock forward pass
489 let batch_size = input.shape()[0];
490 SciRS2Array::randn(vec![batch_size, 2], SciRS2Device::CPU)
491 }Sourcepub fn randint(
low: i32,
high: i32,
shape: Vec<usize>,
device: SciRS2Device,
) -> Result<Self>
pub fn randint( low: i32, high: i32, shape: Vec<usize>, device: SciRS2Device, ) -> Result<Self>
Create random integers
Sourcepub fn quantum_observable(name: &str, num_qubits: usize) -> Result<Self>
pub fn quantum_observable(name: &str, num_qubits: usize) -> Result<Self>
Create quantum observable
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 212)
24fn main() -> Result<()> {
25 println!("=== SciRS2 Distributed Training Demo ===\n");
26
27 // Step 1: Initialize SciRS2 distributed environment
28 println!("1. Initializing SciRS2 distributed environment...");
29
30 let distributed_trainer = SciRS2DistributedTrainer::new(
31 4, // world_size
32 0, // rank
33 );
34
35 println!(" - Workers: 4");
36 println!(" - Backend: {}", distributed_trainer.backend);
37 println!(" - World size: {}", distributed_trainer.world_size);
38
39 // Step 2: Create SciRS2 tensors and arrays
40 println!("\n2. Creating SciRS2 tensors and arrays...");
41
42 let data_shape = (1000, 8);
43 let mut scirs2_array =
44 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
45 scirs2_array.requires_grad = true;
46
47 // Placeholder for quantum-friendly data initialization
48 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
49
50 println!(" - Array shape: {:?}", scirs2_array.shape());
51 println!(" - Requires grad: {}", scirs2_array.requires_grad);
52 println!(" - Device: CPU"); // Placeholder
53
54 // Create SciRS2 tensor for quantum parameters
55 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
56 let mut quantum_params = SciRS2Array::new(param_data, true);
57
58 // Placeholder for quantum parameter initialization
59 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
60
61 println!(
62 " - Quantum parameters shape: {:?}",
63 quantum_params.data.shape()
64 );
65 println!(
66 " - Parameter range: [{:.4}, {:.4}]",
67 quantum_params
68 .data
69 .iter()
70 .fold(f64::INFINITY, |a, &b| a.min(b)),
71 quantum_params
72 .data
73 .iter()
74 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
75 );
76
77 // Step 3: Setup distributed quantum model
78 println!("\n3. Setting up distributed quantum model...");
79
80 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
81
82 // Wrap model for distributed training
83 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
84
85 println!(
86 " - Model parameters: {}",
87 distributed_model.num_parameters()
88 );
89 println!(" - Distributed: {}", distributed_model.is_distributed());
90
91 // Step 4: Create SciRS2 optimizers
92 println!("\n4. Configuring SciRS2 optimizers...");
93
94 let optimizer = SciRS2Optimizer::new("adam");
95
96 // Configure distributed optimizer
97 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
98
99 println!(" - Optimizer: Adam with SciRS2 backend");
100 println!(" - Learning rate: 0.001"); // Placeholder
101 println!(" - Distributed synchronization: enabled");
102
103 // Step 5: Distributed data loading
104 println!("\n5. Setting up distributed data loading...");
105
106 let dataset = create_large_quantum_dataset(10000, 8)?;
107 println!(" - Dataset created with {} samples", dataset.size);
108 println!(" - Distributed sampling configured");
109
110 // Create data loader
111 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
112
113 println!(" - Total dataset size: {}", data_loader.dataset.size);
114 println!(" - Local batches per worker: 156"); // placeholder
115 println!(" - Global batch size: 64"); // placeholder
116
117 // Step 6: Distributed training loop
118 println!("\n6. Starting distributed training...");
119
120 let num_epochs = 10;
121 let mut training_metrics = SciRS2TrainingMetrics::new();
122
123 for epoch in 0..num_epochs {
124 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
125
126 let mut epoch_loss = 0.0;
127 let mut num_batches = 0;
128
129 for (batch_idx, (data, targets)) in data_loader.enumerate() {
130 // Convert to SciRS2 tensors
131 let data_tensor = data.clone();
132 let target_tensor = targets.clone();
133
134 // Zero gradients
135 // distributed_optimizer.zero_grad()?; // placeholder
136
137 // Forward pass
138 let outputs = distributed_model.forward(&data_tensor)?;
139 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
140
141 // Backward pass with automatic differentiation
142 // loss.backward()?; // placeholder
143
144 // Gradient synchronization across workers
145 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
146
147 // Optimizer step
148 // distributed_optimizer.step()?; // placeholder
149
150 epoch_loss += loss.data.iter().sum::<f64>();
151 num_batches += 1;
152
153 if batch_idx % 10 == 0 {
154 println!(
155 " Epoch {}, Batch {}: loss = {:.6}",
156 epoch,
157 batch_idx,
158 loss.data.iter().sum::<f64>()
159 );
160 }
161 }
162
163 // Collect metrics across all workers
164 let avg_loss =
165 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
166 training_metrics.record_epoch(epoch, avg_loss);
167
168 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
169 }
170
171 // Step 7: Distributed evaluation
172 println!("\n7. Distributed model evaluation...");
173
174 let test_dataset = create_test_quantum_dataset(2000, 8)?;
175 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
176 println!(
177 " - Test dataset configured with {} samples",
178 test_dataset.size
179 );
180
181 let evaluation_results = evaluate_distributed_model(
182 &distributed_model,
183 &mut SciRS2DataLoader::new(test_dataset, 64),
184 &distributed_trainer,
185 )?;
186
187 println!(" Distributed Evaluation Results:");
188 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
189 println!(" - Test loss: {:.6}", evaluation_results.loss);
190 println!(
191 " - Quantum fidelity: {:.4}",
192 evaluation_results.quantum_fidelity
193 );
194
195 // Step 8: SciRS2 tensor operations
196 println!("\n8. Demonstrating SciRS2 tensor operations...");
197
198 // Advanced tensor operations
199 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
200 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
201
202 // Matrix multiplication with automatic broadcasting
203 let result = tensor_a.matmul(&tensor_b)?;
204 println!(
205 " - Matrix multiplication: {:?} x {:?} = {:?}",
206 tensor_a.shape(),
207 tensor_b.shape(),
208 result.shape()
209 );
210
211 // Quantum-specific operations
212 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
213 // Placeholder for quantum evolution
214 let evolved_state = quantum_state;
215 let fidelity = 0.95; // Mock fidelity
216
217 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
218
219 // Placeholder for distributed tensor operations
220 let distributed_tensor = tensor_a;
221 let local_computation = distributed_tensor.sum(None)?;
222 let global_result = local_computation;
223
224 println!(
225 " - Distributed computation result shape: {:?}",
226 global_result.shape()
227 );
228
229 // Step 9: Scientific computing features
230 println!("\n9. SciRS2 scientific computing features...");
231
232 // Numerical integration for quantum expectation values
233 let observable = create_quantum_observable(4)?;
234 let expectation_value = 0.5; // Mock expectation value
235 println!(" - Quantum expectation value: {expectation_value:.6}");
236
237 // Optimization with scientific methods
238 let mut optimization_result = OptimizationResult {
239 converged: true,
240 final_value: compute_quantum_energy(&quantum_params)?,
241 num_iterations: 42,
242 };
243
244 println!(
245 " - LBFGS optimization converged: {}",
246 optimization_result.converged
247 );
248 println!(" - Final energy: {:.8}", optimization_result.final_value);
249 println!(" - Iterations: {}", optimization_result.num_iterations);
250
251 // Step 10: Model serialization with SciRS2
252 println!("\n10. SciRS2 model serialization...");
253
254 let serializer = SciRS2Serializer;
255
256 // Save distributed model
257 SciRS2Serializer::save_model(
258 &distributed_model.state_dict(),
259 "distributed_quantum_model.h5",
260 )?;
261 println!(" - Model saved with SciRS2 serializer");
262
263 // Save training state for checkpointing
264 let checkpoint = SciRS2Checkpoint {
265 model_state: distributed_model.state_dict(),
266 optimizer_state: HashMap::new(), // Placeholder for optimizer state
267 epoch: num_epochs,
268 metrics: training_metrics.clone(),
269 };
270
271 SciRS2Serializer::save_checkpoint(
272 &checkpoint.model_state,
273 &SciRS2Optimizer::new("adam"),
274 checkpoint.epoch,
275 "training_checkpoint.h5",
276 )?;
277 println!(" - Training checkpoint saved");
278
279 // Load and verify
280 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
281 println!(" - Model loaded successfully");
282
283 // Step 11: Performance analysis
284 println!("\n11. Distributed training performance analysis...");
285
286 let performance_metrics = PerformanceMetrics {
287 communication_overhead: 0.15,
288 scaling_efficiency: 0.85,
289 memory_usage_gb: 2.5,
290 avg_batch_time: 0.042,
291 };
292
293 println!(" Performance Metrics:");
294 println!(
295 " - Communication overhead: {:.2}%",
296 performance_metrics.communication_overhead * 100.0
297 );
298 println!(
299 " - Scaling efficiency: {:.2}%",
300 performance_metrics.scaling_efficiency * 100.0
301 );
302 println!(
303 " - Memory usage per worker: {:.1} GB",
304 performance_metrics.memory_usage_gb
305 );
306 println!(
307 " - Average batch processing time: {:.3}s",
308 performance_metrics.avg_batch_time
309 );
310
311 // Step 12: Cleanup distributed environment
312 println!("\n12. Cleaning up distributed environment...");
313
314 // distributed_trainer.cleanup()?; // Placeholder
315 println!(" - Distributed training environment cleaned up");
316
317 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
318
319 Ok(())
320}
321
322fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
323 DistributedQuantumModel::new(
324 4, // num_qubits
325 3, // num_layers
326 "hardware_efficient", // ansatz_type
327 params.to_scirs2()?, // parameters
328 "expectation_value", // measurement_type
329 )
330}
331
332fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
333 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
334 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
335
336 SciRS2Dataset::new(data, labels)
337}
338
339fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
340 create_large_quantum_dataset(num_samples, num_features)
341}
342
343fn compute_quantum_loss(
344 outputs: &dyn SciRS2Tensor,
345 targets: &dyn SciRS2Tensor,
346) -> Result<SciRS2Array> {
347 // Quantum-aware loss function (placeholder implementation)
348 let outputs_array = outputs.to_scirs2()?;
349 let targets_array = targets.to_scirs2()?;
350 let diff = &outputs_array.data - &targets_array.data;
351 let mse_data = &diff * &diff;
352 let mse_loss = SciRS2Array::new(
353 mse_data
354 .mean_axis(scirs2_core::ndarray::Axis(0))
355 .unwrap()
356 .into_dyn(),
357 false,
358 );
359 Ok(mse_loss)
360}
361
362fn evaluate_distributed_model(
363 model: &DistributedQuantumModel,
364 test_loader: &mut SciRS2DataLoader,
365 trainer: &SciRS2DistributedTrainer,
366) -> Result<EvaluationResults> {
367 let mut total_loss = 0.0;
368 let mut total_accuracy = 0.0;
369 let mut total_fidelity = 0.0;
370 let mut num_batches = 0;
371
372 for _batch_idx in 0..10 {
373 // Mock evaluation loop
374 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
375 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
376 let outputs = model.forward(&data)?;
377 let loss = compute_quantum_loss(&outputs, &targets)?;
378
379 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
380 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
381
382 total_loss += loss.data.iter().sum::<f64>();
383 total_accuracy += batch_accuracy;
384 total_fidelity += batch_fidelity;
385 num_batches += 1;
386 }
387
388 // Average across all workers
389 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
390 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
391 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
392
393 Ok(EvaluationResults {
394 loss: avg_loss,
395 accuracy: avg_accuracy,
396 quantum_fidelity: avg_fidelity,
397 })
398}
399
400fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
401 // Create Pauli-Z observable for all qubits
402 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
403}Trait Implementations§
Source§impl Clone for SciRS2Array
impl Clone for SciRS2Array
Source§impl Debug for SciRS2Array
impl Debug for SciRS2Array
Source§impl SciRS2Tensor for SciRS2Array
impl SciRS2Tensor for SciRS2Array
Source§fn view(&self) -> ArrayViewD<'_, f64>
fn view(&self) -> ArrayViewD<'_, f64>
Get tensor data as ArrayViewD
Source§fn to_scirs2(&self) -> Result<SciRS2Array>
fn to_scirs2(&self) -> Result<SciRS2Array>
Convert to SciRS2 format (placeholder)
Source§fn matmul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn matmul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
Perform tensor operations using SciRS2 backend
Source§fn add(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn add(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
Element-wise operations
fn mul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn sub(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn mean(&self, axis: Option<usize>) -> Result<SciRS2Array>
fn max(&self, axis: Option<usize>) -> Result<SciRS2Array>
fn min(&self, axis: Option<usize>) -> Result<SciRS2Array>
Auto Trait Implementations§
impl Freeze for SciRS2Array
impl !RefUnwindSafe for SciRS2Array
impl Send for SciRS2Array
impl Sync for SciRS2Array
impl Unpin for SciRS2Array
impl !UnwindSafe for SciRS2Array
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.