pub struct SciRS2Array {
pub data: ArrayD<f64>,
pub requires_grad: bool,
pub grad: Option<ArrayD<f64>>,
pub grad_fn: Option<Box<dyn GradFunction>>,
}Expand description
SciRS2 array wrapper for quantum ML operations
Fields§
§data: ArrayD<f64>Array data
requires_grad: boolWhether gradients are required
grad: Option<ArrayD<f64>>Gradient accumulator
grad_fn: Option<Box<dyn GradFunction>>Operation history for backpropagation
Implementations§
Source§impl SciRS2Array
impl SciRS2Array
Sourcepub fn new(data: ArrayD<f64>, requires_grad: bool) -> Self
pub fn new(data: ArrayD<f64>, requires_grad: bool) -> Self
Create a new SciRS2Array
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 35)
15fn main() -> Result<()> {
16 println!("=== SciRS2 Distributed Training Demo ===\n");
17
18 // Step 1: Initialize SciRS2 distributed environment
19 println!("1. Initializing SciRS2 distributed environment...");
20
21 let distributed_trainer = SciRS2DistributedTrainer::new(
22 4, // world_size
23 0, // rank
24 );
25
26 println!(" - Workers: 4");
27 println!(" - Backend: {}", distributed_trainer.backend);
28 println!(" - World size: {}", distributed_trainer.world_size);
29
30 // Step 2: Create SciRS2 tensors and arrays
31 println!("\n2. Creating SciRS2 tensors and arrays...");
32
33 let data_shape = (1000, 8);
34 let mut scirs2_array =
35 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
36 scirs2_array.requires_grad = true;
37
38 // Placeholder for quantum-friendly data initialization
39 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
40
41 println!(" - Array shape: {:?}", scirs2_array.shape());
42 println!(" - Requires grad: {}", scirs2_array.requires_grad);
43 println!(" - Device: CPU"); // Placeholder
44
45 // Create SciRS2 tensor for quantum parameters
46 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
47 let mut quantum_params = SciRS2Array::new(param_data, true);
48
49 // Placeholder for quantum parameter initialization
50 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
51
52 println!(
53 " - Quantum parameters shape: {:?}",
54 quantum_params.data.shape()
55 );
56 println!(
57 " - Parameter range: [{:.4}, {:.4}]",
58 quantum_params
59 .data
60 .iter()
61 .fold(f64::INFINITY, |a, &b| a.min(b)),
62 quantum_params
63 .data
64 .iter()
65 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
66 );
67
68 // Step 3: Setup distributed quantum model
69 println!("\n3. Setting up distributed quantum model...");
70
71 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
72
73 // Wrap model for distributed training
74 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
75
76 println!(
77 " - Model parameters: {}",
78 distributed_model.num_parameters()
79 );
80 println!(" - Distributed: {}", distributed_model.is_distributed());
81
82 // Step 4: Create SciRS2 optimizers
83 println!("\n4. Configuring SciRS2 optimizers...");
84
85 let optimizer = SciRS2Optimizer::new("adam");
86
87 // Configure distributed optimizer
88 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
89
90 println!(" - Optimizer: Adam with SciRS2 backend");
91 println!(" - Learning rate: 0.001"); // Placeholder
92 println!(" - Distributed synchronization: enabled");
93
94 // Step 5: Distributed data loading
95 println!("\n5. Setting up distributed data loading...");
96
97 let dataset = create_large_quantum_dataset(10000, 8)?;
98 println!(" - Dataset created with {} samples", dataset.size);
99 println!(" - Distributed sampling configured");
100
101 // Create data loader
102 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
103
104 println!(" - Total dataset size: {}", data_loader.dataset.size);
105 println!(" - Local batches per worker: 156"); // placeholder
106 println!(" - Global batch size: 64"); // placeholder
107
108 // Step 6: Distributed training loop
109 println!("\n6. Starting distributed training...");
110
111 let num_epochs = 10;
112 let mut training_metrics = SciRS2TrainingMetrics::new();
113
114 for epoch in 0..num_epochs {
115 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
116
117 let mut epoch_loss = 0.0;
118 let mut num_batches = 0;
119
120 for (batch_idx, (data, targets)) in data_loader.enumerate() {
121 // Convert to SciRS2 tensors
122 let data_tensor = data.clone();
123 let target_tensor = targets.clone();
124
125 // Zero gradients
126 // distributed_optimizer.zero_grad()?; // placeholder
127
128 // Forward pass
129 let outputs = distributed_model.forward(&data_tensor)?;
130 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
131
132 // Backward pass with automatic differentiation
133 // loss.backward()?; // placeholder
134
135 // Gradient synchronization across workers
136 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
137
138 // Optimizer step
139 // distributed_optimizer.step()?; // placeholder
140
141 epoch_loss += loss.data.iter().sum::<f64>();
142 num_batches += 1;
143
144 if batch_idx % 10 == 0 {
145 println!(
146 " Epoch {}, Batch {}: loss = {:.6}",
147 epoch,
148 batch_idx,
149 loss.data.iter().sum::<f64>()
150 );
151 }
152 }
153
154 // Collect metrics across all workers
155 let avg_loss =
156 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
157 training_metrics.record_epoch(epoch, avg_loss);
158
159 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
160 }
161
162 // Step 7: Distributed evaluation
163 println!("\n7. Distributed model evaluation...");
164
165 let test_dataset = create_test_quantum_dataset(2000, 8)?;
166 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
167 println!(
168 " - Test dataset configured with {} samples",
169 test_dataset.size
170 );
171
172 let evaluation_results = evaluate_distributed_model(
173 &distributed_model,
174 &mut SciRS2DataLoader::new(test_dataset, 64),
175 &distributed_trainer,
176 )?;
177
178 println!(" Distributed Evaluation Results:");
179 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
180 println!(" - Test loss: {:.6}", evaluation_results.loss);
181 println!(
182 " - Quantum fidelity: {:.4}",
183 evaluation_results.quantum_fidelity
184 );
185
186 // Step 8: SciRS2 tensor operations
187 println!("\n8. Demonstrating SciRS2 tensor operations...");
188
189 // Advanced tensor operations
190 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
191 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
192
193 // Matrix multiplication with automatic broadcasting
194 let result = tensor_a.matmul(&tensor_b)?;
195 println!(
196 " - Matrix multiplication: {:?} x {:?} = {:?}",
197 tensor_a.shape(),
198 tensor_b.shape(),
199 result.shape()
200 );
201
202 // Quantum-specific operations
203 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
204 // Placeholder for quantum evolution
205 let evolved_state = quantum_state;
206 let fidelity = 0.95; // Mock fidelity
207
208 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
209
210 // Placeholder for distributed tensor operations
211 let distributed_tensor = tensor_a;
212 let local_computation = distributed_tensor.sum(None)?;
213 let global_result = local_computation;
214
215 println!(
216 " - Distributed computation result shape: {:?}",
217 global_result.shape()
218 );
219
220 // Step 9: Scientific computing features
221 println!("\n9. SciRS2 scientific computing features...");
222
223 // Numerical integration for quantum expectation values
224 let observable = create_quantum_observable(4)?;
225 let expectation_value = 0.5; // Mock expectation value
226 println!(" - Quantum expectation value: {expectation_value:.6}");
227
228 // Optimization with scientific methods
229 let mut optimization_result = OptimizationResult {
230 converged: true,
231 final_value: compute_quantum_energy(&quantum_params)?,
232 num_iterations: 42,
233 };
234
235 println!(
236 " - LBFGS optimization converged: {}",
237 optimization_result.converged
238 );
239 println!(" - Final energy: {:.8}", optimization_result.final_value);
240 println!(" - Iterations: {}", optimization_result.num_iterations);
241
242 // Step 10: Model serialization with SciRS2
243 println!("\n10. SciRS2 model serialization...");
244
245 let serializer = SciRS2Serializer;
246
247 // Save distributed model
248 SciRS2Serializer::save_model(
249 &distributed_model.state_dict(),
250 "distributed_quantum_model.h5",
251 )?;
252 println!(" - Model saved with SciRS2 serializer");
253
254 // Save training state for checkpointing
255 let checkpoint = SciRS2Checkpoint {
256 model_state: distributed_model.state_dict(),
257 optimizer_state: HashMap::new(), // Placeholder for optimizer state
258 epoch: num_epochs,
259 metrics: training_metrics.clone(),
260 };
261
262 SciRS2Serializer::save_checkpoint(
263 &checkpoint.model_state,
264 &SciRS2Optimizer::new("adam"),
265 checkpoint.epoch,
266 "training_checkpoint.h5",
267 )?;
268 println!(" - Training checkpoint saved");
269
270 // Load and verify
271 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
272 println!(" - Model loaded successfully");
273
274 // Step 11: Performance analysis
275 println!("\n11. Distributed training performance analysis...");
276
277 let performance_metrics = PerformanceMetrics {
278 communication_overhead: 0.15,
279 scaling_efficiency: 0.85,
280 memory_usage_gb: 2.5,
281 avg_batch_time: 0.042,
282 };
283
284 println!(" Performance Metrics:");
285 println!(
286 " - Communication overhead: {:.2}%",
287 performance_metrics.communication_overhead * 100.0
288 );
289 println!(
290 " - Scaling efficiency: {:.2}%",
291 performance_metrics.scaling_efficiency * 100.0
292 );
293 println!(
294 " - Memory usage per worker: {:.1} GB",
295 performance_metrics.memory_usage_gb
296 );
297 println!(
298 " - Average batch processing time: {:.3}s",
299 performance_metrics.avg_batch_time
300 );
301
302 // Step 12: Cleanup distributed environment
303 println!("\n12. Cleaning up distributed environment...");
304
305 // distributed_trainer.cleanup()?; // Placeholder
306 println!(" - Distributed training environment cleaned up");
307
308 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
309
310 Ok(())
311}
312
313fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
314 DistributedQuantumModel::new(
315 4, // num_qubits
316 3, // num_layers
317 "hardware_efficient", // ansatz_type
318 params.to_scirs2()?, // parameters
319 "expectation_value", // measurement_type
320 )
321}
322
323fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
324 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
325 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
326
327 SciRS2Dataset::new(data, labels)
328}
329
330fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
331 create_large_quantum_dataset(num_samples, num_features)
332}
333
334fn compute_quantum_loss(
335 outputs: &dyn SciRS2Tensor,
336 targets: &dyn SciRS2Tensor,
337) -> Result<SciRS2Array> {
338 // Quantum-aware loss function (placeholder implementation)
339 let outputs_array = outputs.to_scirs2()?;
340 let targets_array = targets.to_scirs2()?;
341 let diff = &outputs_array.data - &targets_array.data;
342 let mse_data = &diff * &diff;
343 let mse_loss = SciRS2Array::new(
344 mse_data
345 .mean_axis(scirs2_core::ndarray::Axis(0))
346 .unwrap()
347 .into_dyn(),
348 false,
349 );
350 Ok(mse_loss)
351}
352
353fn evaluate_distributed_model(
354 model: &DistributedQuantumModel,
355 test_loader: &mut SciRS2DataLoader,
356 trainer: &SciRS2DistributedTrainer,
357) -> Result<EvaluationResults> {
358 let mut total_loss = 0.0;
359 let mut total_accuracy = 0.0;
360 let mut total_fidelity = 0.0;
361 let mut num_batches = 0;
362
363 for _batch_idx in 0..10 {
364 // Mock evaluation loop
365 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
366 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
367 let outputs = model.forward(&data)?;
368 let loss = compute_quantum_loss(&outputs, &targets)?;
369
370 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
371 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
372
373 total_loss += loss.data.iter().sum::<f64>();
374 total_accuracy += batch_accuracy;
375 total_fidelity += batch_fidelity;
376 num_batches += 1;
377 }
378
379 // Average across all workers
380 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
381 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
382 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
383
384 Ok(EvaluationResults {
385 loss: avg_loss,
386 accuracy: avg_accuracy,
387 quantum_fidelity: avg_fidelity,
388 })
389}
390
391fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
392 // Create Pauli-Z observable for all qubits
393 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
394}
395
396fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
397 // Mock quantum energy computation
398 let params_array = params.to_scirs2()?;
399 let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
400 let sum_abs = params_array.data.iter().sum::<f64>().abs();
401 let energy = 0.5f64.mul_add(sum_abs, norm_squared);
402 Ok(energy)
403}
404
405fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
406 // Mock gradient computation using parameter shift rule
407 // Mock gradient computation using parameter shift rule
408 let params_array = params.to_scirs2()?;
409 let gradient_data = ¶ms_array.data * 2.0 + 0.5;
410 let gradient = SciRS2Array::new(gradient_data, false);
411 Ok(gradient)
412}Sourcepub fn from_array<D: Dimension>(arr: Array<f64, D>) -> Self
pub fn from_array<D: Dimension>(arr: Array<f64, D>) -> Self
Create from ndarray
Examples found in repository?
examples/pytorch_integration_demo.rs (line 175)
137fn create_quantum_datasets() -> Result<(MemoryDataLoader, MemoryDataLoader)> {
138 // Create synthetic quantum-friendly dataset
139 let num_train = 800;
140 let num_test = 200;
141 let num_features = 4;
142
143 // Training data with quantum entanglement patterns
144 let train_data = Array2::from_shape_fn((num_train, num_features), |(i, j)| {
145 let phase = (i as f64).mul_add(0.1, j as f64 * 0.2);
146 (phase.sin() + (phase * 2.0).cos()) * 0.5
147 });
148
149 let train_labels = Array1::from_shape_fn(num_train, |i| {
150 // Create labels based on quantum-like correlations
151 let sum = (0..num_features).map(|j| train_data[[i, j]]).sum::<f64>();
152 if sum > 0.0 {
153 1.0
154 } else {
155 0.0
156 }
157 });
158
159 // Test data
160 let test_data = Array2::from_shape_fn((num_test, num_features), |(i, j)| {
161 let phase = (i as f64).mul_add(0.15, j as f64 * 0.25);
162 (phase.sin() + (phase * 2.0).cos()) * 0.5
163 });
164
165 let test_labels = Array1::from_shape_fn(num_test, |i| {
166 let sum = (0..num_features).map(|j| test_data[[i, j]]).sum::<f64>();
167 if sum > 0.0 {
168 1.0
169 } else {
170 0.0
171 }
172 });
173
174 let train_loader = MemoryDataLoader::new(
175 SciRS2Array::from_array(train_data.into_dyn()),
176 SciRS2Array::from_array(train_labels.into_dyn()),
177 32,
178 true,
179 )?;
180 let test_loader = MemoryDataLoader::new(
181 SciRS2Array::from_array(test_data.into_dyn()),
182 SciRS2Array::from_array(test_labels.into_dyn()),
183 32,
184 false,
185 )?;
186
187 Ok((train_loader, test_loader))
188}Sourcepub fn matmul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn matmul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Matrix multiplication using SciRS2 backend
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 194)
15fn main() -> Result<()> {
16 println!("=== SciRS2 Distributed Training Demo ===\n");
17
18 // Step 1: Initialize SciRS2 distributed environment
19 println!("1. Initializing SciRS2 distributed environment...");
20
21 let distributed_trainer = SciRS2DistributedTrainer::new(
22 4, // world_size
23 0, // rank
24 );
25
26 println!(" - Workers: 4");
27 println!(" - Backend: {}", distributed_trainer.backend);
28 println!(" - World size: {}", distributed_trainer.world_size);
29
30 // Step 2: Create SciRS2 tensors and arrays
31 println!("\n2. Creating SciRS2 tensors and arrays...");
32
33 let data_shape = (1000, 8);
34 let mut scirs2_array =
35 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
36 scirs2_array.requires_grad = true;
37
38 // Placeholder for quantum-friendly data initialization
39 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
40
41 println!(" - Array shape: {:?}", scirs2_array.shape());
42 println!(" - Requires grad: {}", scirs2_array.requires_grad);
43 println!(" - Device: CPU"); // Placeholder
44
45 // Create SciRS2 tensor for quantum parameters
46 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
47 let mut quantum_params = SciRS2Array::new(param_data, true);
48
49 // Placeholder for quantum parameter initialization
50 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
51
52 println!(
53 " - Quantum parameters shape: {:?}",
54 quantum_params.data.shape()
55 );
56 println!(
57 " - Parameter range: [{:.4}, {:.4}]",
58 quantum_params
59 .data
60 .iter()
61 .fold(f64::INFINITY, |a, &b| a.min(b)),
62 quantum_params
63 .data
64 .iter()
65 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
66 );
67
68 // Step 3: Setup distributed quantum model
69 println!("\n3. Setting up distributed quantum model...");
70
71 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
72
73 // Wrap model for distributed training
74 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
75
76 println!(
77 " - Model parameters: {}",
78 distributed_model.num_parameters()
79 );
80 println!(" - Distributed: {}", distributed_model.is_distributed());
81
82 // Step 4: Create SciRS2 optimizers
83 println!("\n4. Configuring SciRS2 optimizers...");
84
85 let optimizer = SciRS2Optimizer::new("adam");
86
87 // Configure distributed optimizer
88 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
89
90 println!(" - Optimizer: Adam with SciRS2 backend");
91 println!(" - Learning rate: 0.001"); // Placeholder
92 println!(" - Distributed synchronization: enabled");
93
94 // Step 5: Distributed data loading
95 println!("\n5. Setting up distributed data loading...");
96
97 let dataset = create_large_quantum_dataset(10000, 8)?;
98 println!(" - Dataset created with {} samples", dataset.size);
99 println!(" - Distributed sampling configured");
100
101 // Create data loader
102 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
103
104 println!(" - Total dataset size: {}", data_loader.dataset.size);
105 println!(" - Local batches per worker: 156"); // placeholder
106 println!(" - Global batch size: 64"); // placeholder
107
108 // Step 6: Distributed training loop
109 println!("\n6. Starting distributed training...");
110
111 let num_epochs = 10;
112 let mut training_metrics = SciRS2TrainingMetrics::new();
113
114 for epoch in 0..num_epochs {
115 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
116
117 let mut epoch_loss = 0.0;
118 let mut num_batches = 0;
119
120 for (batch_idx, (data, targets)) in data_loader.enumerate() {
121 // Convert to SciRS2 tensors
122 let data_tensor = data.clone();
123 let target_tensor = targets.clone();
124
125 // Zero gradients
126 // distributed_optimizer.zero_grad()?; // placeholder
127
128 // Forward pass
129 let outputs = distributed_model.forward(&data_tensor)?;
130 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
131
132 // Backward pass with automatic differentiation
133 // loss.backward()?; // placeholder
134
135 // Gradient synchronization across workers
136 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
137
138 // Optimizer step
139 // distributed_optimizer.step()?; // placeholder
140
141 epoch_loss += loss.data.iter().sum::<f64>();
142 num_batches += 1;
143
144 if batch_idx % 10 == 0 {
145 println!(
146 " Epoch {}, Batch {}: loss = {:.6}",
147 epoch,
148 batch_idx,
149 loss.data.iter().sum::<f64>()
150 );
151 }
152 }
153
154 // Collect metrics across all workers
155 let avg_loss =
156 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
157 training_metrics.record_epoch(epoch, avg_loss);
158
159 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
160 }
161
162 // Step 7: Distributed evaluation
163 println!("\n7. Distributed model evaluation...");
164
165 let test_dataset = create_test_quantum_dataset(2000, 8)?;
166 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
167 println!(
168 " - Test dataset configured with {} samples",
169 test_dataset.size
170 );
171
172 let evaluation_results = evaluate_distributed_model(
173 &distributed_model,
174 &mut SciRS2DataLoader::new(test_dataset, 64),
175 &distributed_trainer,
176 )?;
177
178 println!(" Distributed Evaluation Results:");
179 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
180 println!(" - Test loss: {:.6}", evaluation_results.loss);
181 println!(
182 " - Quantum fidelity: {:.4}",
183 evaluation_results.quantum_fidelity
184 );
185
186 // Step 8: SciRS2 tensor operations
187 println!("\n8. Demonstrating SciRS2 tensor operations...");
188
189 // Advanced tensor operations
190 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
191 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
192
193 // Matrix multiplication with automatic broadcasting
194 let result = tensor_a.matmul(&tensor_b)?;
195 println!(
196 " - Matrix multiplication: {:?} x {:?} = {:?}",
197 tensor_a.shape(),
198 tensor_b.shape(),
199 result.shape()
200 );
201
202 // Quantum-specific operations
203 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
204 // Placeholder for quantum evolution
205 let evolved_state = quantum_state;
206 let fidelity = 0.95; // Mock fidelity
207
208 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
209
210 // Placeholder for distributed tensor operations
211 let distributed_tensor = tensor_a;
212 let local_computation = distributed_tensor.sum(None)?;
213 let global_result = local_computation;
214
215 println!(
216 " - Distributed computation result shape: {:?}",
217 global_result.shape()
218 );
219
220 // Step 9: Scientific computing features
221 println!("\n9. SciRS2 scientific computing features...");
222
223 // Numerical integration for quantum expectation values
224 let observable = create_quantum_observable(4)?;
225 let expectation_value = 0.5; // Mock expectation value
226 println!(" - Quantum expectation value: {expectation_value:.6}");
227
228 // Optimization with scientific methods
229 let mut optimization_result = OptimizationResult {
230 converged: true,
231 final_value: compute_quantum_energy(&quantum_params)?,
232 num_iterations: 42,
233 };
234
235 println!(
236 " - LBFGS optimization converged: {}",
237 optimization_result.converged
238 );
239 println!(" - Final energy: {:.8}", optimization_result.final_value);
240 println!(" - Iterations: {}", optimization_result.num_iterations);
241
242 // Step 10: Model serialization with SciRS2
243 println!("\n10. SciRS2 model serialization...");
244
245 let serializer = SciRS2Serializer;
246
247 // Save distributed model
248 SciRS2Serializer::save_model(
249 &distributed_model.state_dict(),
250 "distributed_quantum_model.h5",
251 )?;
252 println!(" - Model saved with SciRS2 serializer");
253
254 // Save training state for checkpointing
255 let checkpoint = SciRS2Checkpoint {
256 model_state: distributed_model.state_dict(),
257 optimizer_state: HashMap::new(), // Placeholder for optimizer state
258 epoch: num_epochs,
259 metrics: training_metrics.clone(),
260 };
261
262 SciRS2Serializer::save_checkpoint(
263 &checkpoint.model_state,
264 &SciRS2Optimizer::new("adam"),
265 checkpoint.epoch,
266 "training_checkpoint.h5",
267 )?;
268 println!(" - Training checkpoint saved");
269
270 // Load and verify
271 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
272 println!(" - Model loaded successfully");
273
274 // Step 11: Performance analysis
275 println!("\n11. Distributed training performance analysis...");
276
277 let performance_metrics = PerformanceMetrics {
278 communication_overhead: 0.15,
279 scaling_efficiency: 0.85,
280 memory_usage_gb: 2.5,
281 avg_batch_time: 0.042,
282 };
283
284 println!(" Performance Metrics:");
285 println!(
286 " - Communication overhead: {:.2}%",
287 performance_metrics.communication_overhead * 100.0
288 );
289 println!(
290 " - Scaling efficiency: {:.2}%",
291 performance_metrics.scaling_efficiency * 100.0
292 );
293 println!(
294 " - Memory usage per worker: {:.1} GB",
295 performance_metrics.memory_usage_gb
296 );
297 println!(
298 " - Average batch processing time: {:.3}s",
299 performance_metrics.avg_batch_time
300 );
301
302 // Step 12: Cleanup distributed environment
303 println!("\n12. Cleaning up distributed environment...");
304
305 // distributed_trainer.cleanup()?; // Placeholder
306 println!(" - Distributed training environment cleaned up");
307
308 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
309
310 Ok(())
311}Sourcepub fn add(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn add(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Element-wise addition
Sourcepub fn mul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
pub fn mul(&self, other: &SciRS2Array) -> Result<SciRS2Array>
Element-wise multiplication
Sourcepub fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>
pub fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>
Reduction sum
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 212)
15fn main() -> Result<()> {
16 println!("=== SciRS2 Distributed Training Demo ===\n");
17
18 // Step 1: Initialize SciRS2 distributed environment
19 println!("1. Initializing SciRS2 distributed environment...");
20
21 let distributed_trainer = SciRS2DistributedTrainer::new(
22 4, // world_size
23 0, // rank
24 );
25
26 println!(" - Workers: 4");
27 println!(" - Backend: {}", distributed_trainer.backend);
28 println!(" - World size: {}", distributed_trainer.world_size);
29
30 // Step 2: Create SciRS2 tensors and arrays
31 println!("\n2. Creating SciRS2 tensors and arrays...");
32
33 let data_shape = (1000, 8);
34 let mut scirs2_array =
35 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
36 scirs2_array.requires_grad = true;
37
38 // Placeholder for quantum-friendly data initialization
39 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
40
41 println!(" - Array shape: {:?}", scirs2_array.shape());
42 println!(" - Requires grad: {}", scirs2_array.requires_grad);
43 println!(" - Device: CPU"); // Placeholder
44
45 // Create SciRS2 tensor for quantum parameters
46 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
47 let mut quantum_params = SciRS2Array::new(param_data, true);
48
49 // Placeholder for quantum parameter initialization
50 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
51
52 println!(
53 " - Quantum parameters shape: {:?}",
54 quantum_params.data.shape()
55 );
56 println!(
57 " - Parameter range: [{:.4}, {:.4}]",
58 quantum_params
59 .data
60 .iter()
61 .fold(f64::INFINITY, |a, &b| a.min(b)),
62 quantum_params
63 .data
64 .iter()
65 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
66 );
67
68 // Step 3: Setup distributed quantum model
69 println!("\n3. Setting up distributed quantum model...");
70
71 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
72
73 // Wrap model for distributed training
74 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
75
76 println!(
77 " - Model parameters: {}",
78 distributed_model.num_parameters()
79 );
80 println!(" - Distributed: {}", distributed_model.is_distributed());
81
82 // Step 4: Create SciRS2 optimizers
83 println!("\n4. Configuring SciRS2 optimizers...");
84
85 let optimizer = SciRS2Optimizer::new("adam");
86
87 // Configure distributed optimizer
88 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
89
90 println!(" - Optimizer: Adam with SciRS2 backend");
91 println!(" - Learning rate: 0.001"); // Placeholder
92 println!(" - Distributed synchronization: enabled");
93
94 // Step 5: Distributed data loading
95 println!("\n5. Setting up distributed data loading...");
96
97 let dataset = create_large_quantum_dataset(10000, 8)?;
98 println!(" - Dataset created with {} samples", dataset.size);
99 println!(" - Distributed sampling configured");
100
101 // Create data loader
102 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
103
104 println!(" - Total dataset size: {}", data_loader.dataset.size);
105 println!(" - Local batches per worker: 156"); // placeholder
106 println!(" - Global batch size: 64"); // placeholder
107
108 // Step 6: Distributed training loop
109 println!("\n6. Starting distributed training...");
110
111 let num_epochs = 10;
112 let mut training_metrics = SciRS2TrainingMetrics::new();
113
114 for epoch in 0..num_epochs {
115 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
116
117 let mut epoch_loss = 0.0;
118 let mut num_batches = 0;
119
120 for (batch_idx, (data, targets)) in data_loader.enumerate() {
121 // Convert to SciRS2 tensors
122 let data_tensor = data.clone();
123 let target_tensor = targets.clone();
124
125 // Zero gradients
126 // distributed_optimizer.zero_grad()?; // placeholder
127
128 // Forward pass
129 let outputs = distributed_model.forward(&data_tensor)?;
130 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
131
132 // Backward pass with automatic differentiation
133 // loss.backward()?; // placeholder
134
135 // Gradient synchronization across workers
136 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
137
138 // Optimizer step
139 // distributed_optimizer.step()?; // placeholder
140
141 epoch_loss += loss.data.iter().sum::<f64>();
142 num_batches += 1;
143
144 if batch_idx % 10 == 0 {
145 println!(
146 " Epoch {}, Batch {}: loss = {:.6}",
147 epoch,
148 batch_idx,
149 loss.data.iter().sum::<f64>()
150 );
151 }
152 }
153
154 // Collect metrics across all workers
155 let avg_loss =
156 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
157 training_metrics.record_epoch(epoch, avg_loss);
158
159 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
160 }
161
162 // Step 7: Distributed evaluation
163 println!("\n7. Distributed model evaluation...");
164
165 let test_dataset = create_test_quantum_dataset(2000, 8)?;
166 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
167 println!(
168 " - Test dataset configured with {} samples",
169 test_dataset.size
170 );
171
172 let evaluation_results = evaluate_distributed_model(
173 &distributed_model,
174 &mut SciRS2DataLoader::new(test_dataset, 64),
175 &distributed_trainer,
176 )?;
177
178 println!(" Distributed Evaluation Results:");
179 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
180 println!(" - Test loss: {:.6}", evaluation_results.loss);
181 println!(
182 " - Quantum fidelity: {:.4}",
183 evaluation_results.quantum_fidelity
184 );
185
186 // Step 8: SciRS2 tensor operations
187 println!("\n8. Demonstrating SciRS2 tensor operations...");
188
189 // Advanced tensor operations
190 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
191 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
192
193 // Matrix multiplication with automatic broadcasting
194 let result = tensor_a.matmul(&tensor_b)?;
195 println!(
196 " - Matrix multiplication: {:?} x {:?} = {:?}",
197 tensor_a.shape(),
198 tensor_b.shape(),
199 result.shape()
200 );
201
202 // Quantum-specific operations
203 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
204 // Placeholder for quantum evolution
205 let evolved_state = quantum_state;
206 let fidelity = 0.95; // Mock fidelity
207
208 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
209
210 // Placeholder for distributed tensor operations
211 let distributed_tensor = tensor_a;
212 let local_computation = distributed_tensor.sum(None)?;
213 let global_result = local_computation;
214
215 println!(
216 " - Distributed computation result shape: {:?}",
217 global_result.shape()
218 );
219
220 // Step 9: Scientific computing features
221 println!("\n9. SciRS2 scientific computing features...");
222
223 // Numerical integration for quantum expectation values
224 let observable = create_quantum_observable(4)?;
225 let expectation_value = 0.5; // Mock expectation value
226 println!(" - Quantum expectation value: {expectation_value:.6}");
227
228 // Optimization with scientific methods
229 let mut optimization_result = OptimizationResult {
230 converged: true,
231 final_value: compute_quantum_energy(&quantum_params)?,
232 num_iterations: 42,
233 };
234
235 println!(
236 " - LBFGS optimization converged: {}",
237 optimization_result.converged
238 );
239 println!(" - Final energy: {:.8}", optimization_result.final_value);
240 println!(" - Iterations: {}", optimization_result.num_iterations);
241
242 // Step 10: Model serialization with SciRS2
243 println!("\n10. SciRS2 model serialization...");
244
245 let serializer = SciRS2Serializer;
246
247 // Save distributed model
248 SciRS2Serializer::save_model(
249 &distributed_model.state_dict(),
250 "distributed_quantum_model.h5",
251 )?;
252 println!(" - Model saved with SciRS2 serializer");
253
254 // Save training state for checkpointing
255 let checkpoint = SciRS2Checkpoint {
256 model_state: distributed_model.state_dict(),
257 optimizer_state: HashMap::new(), // Placeholder for optimizer state
258 epoch: num_epochs,
259 metrics: training_metrics.clone(),
260 };
261
262 SciRS2Serializer::save_checkpoint(
263 &checkpoint.model_state,
264 &SciRS2Optimizer::new("adam"),
265 checkpoint.epoch,
266 "training_checkpoint.h5",
267 )?;
268 println!(" - Training checkpoint saved");
269
270 // Load and verify
271 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
272 println!(" - Model loaded successfully");
273
274 // Step 11: Performance analysis
275 println!("\n11. Distributed training performance analysis...");
276
277 let performance_metrics = PerformanceMetrics {
278 communication_overhead: 0.15,
279 scaling_efficiency: 0.85,
280 memory_usage_gb: 2.5,
281 avg_batch_time: 0.042,
282 };
283
284 println!(" Performance Metrics:");
285 println!(
286 " - Communication overhead: {:.2}%",
287 performance_metrics.communication_overhead * 100.0
288 );
289 println!(
290 " - Scaling efficiency: {:.2}%",
291 performance_metrics.scaling_efficiency * 100.0
292 );
293 println!(
294 " - Memory usage per worker: {:.1} GB",
295 performance_metrics.memory_usage_gb
296 );
297 println!(
298 " - Average batch processing time: {:.3}s",
299 performance_metrics.avg_batch_time
300 );
301
302 // Step 12: Cleanup distributed environment
303 println!("\n12. Cleaning up distributed environment...");
304
305 // distributed_trainer.cleanup()?; // Placeholder
306 println!(" - Distributed training environment cleaned up");
307
308 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
309
310 Ok(())
311}Source§impl SciRS2Array
Additional SciRS2Array methods for compatibility
impl SciRS2Array
Additional SciRS2Array methods for compatibility
Sourcepub fn randn(shape: Vec<usize>, device: SciRS2Device) -> Result<Self>
pub fn randn(shape: Vec<usize>, device: SciRS2Device) -> Result<Self>
Create array with specified device
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 190)
15fn main() -> Result<()> {
16 println!("=== SciRS2 Distributed Training Demo ===\n");
17
18 // Step 1: Initialize SciRS2 distributed environment
19 println!("1. Initializing SciRS2 distributed environment...");
20
21 let distributed_trainer = SciRS2DistributedTrainer::new(
22 4, // world_size
23 0, // rank
24 );
25
26 println!(" - Workers: 4");
27 println!(" - Backend: {}", distributed_trainer.backend);
28 println!(" - World size: {}", distributed_trainer.world_size);
29
30 // Step 2: Create SciRS2 tensors and arrays
31 println!("\n2. Creating SciRS2 tensors and arrays...");
32
33 let data_shape = (1000, 8);
34 let mut scirs2_array =
35 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
36 scirs2_array.requires_grad = true;
37
38 // Placeholder for quantum-friendly data initialization
39 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
40
41 println!(" - Array shape: {:?}", scirs2_array.shape());
42 println!(" - Requires grad: {}", scirs2_array.requires_grad);
43 println!(" - Device: CPU"); // Placeholder
44
45 // Create SciRS2 tensor for quantum parameters
46 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
47 let mut quantum_params = SciRS2Array::new(param_data, true);
48
49 // Placeholder for quantum parameter initialization
50 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
51
52 println!(
53 " - Quantum parameters shape: {:?}",
54 quantum_params.data.shape()
55 );
56 println!(
57 " - Parameter range: [{:.4}, {:.4}]",
58 quantum_params
59 .data
60 .iter()
61 .fold(f64::INFINITY, |a, &b| a.min(b)),
62 quantum_params
63 .data
64 .iter()
65 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
66 );
67
68 // Step 3: Setup distributed quantum model
69 println!("\n3. Setting up distributed quantum model...");
70
71 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
72
73 // Wrap model for distributed training
74 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
75
76 println!(
77 " - Model parameters: {}",
78 distributed_model.num_parameters()
79 );
80 println!(" - Distributed: {}", distributed_model.is_distributed());
81
82 // Step 4: Create SciRS2 optimizers
83 println!("\n4. Configuring SciRS2 optimizers...");
84
85 let optimizer = SciRS2Optimizer::new("adam");
86
87 // Configure distributed optimizer
88 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
89
90 println!(" - Optimizer: Adam with SciRS2 backend");
91 println!(" - Learning rate: 0.001"); // Placeholder
92 println!(" - Distributed synchronization: enabled");
93
94 // Step 5: Distributed data loading
95 println!("\n5. Setting up distributed data loading...");
96
97 let dataset = create_large_quantum_dataset(10000, 8)?;
98 println!(" - Dataset created with {} samples", dataset.size);
99 println!(" - Distributed sampling configured");
100
101 // Create data loader
102 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
103
104 println!(" - Total dataset size: {}", data_loader.dataset.size);
105 println!(" - Local batches per worker: 156"); // placeholder
106 println!(" - Global batch size: 64"); // placeholder
107
108 // Step 6: Distributed training loop
109 println!("\n6. Starting distributed training...");
110
111 let num_epochs = 10;
112 let mut training_metrics = SciRS2TrainingMetrics::new();
113
114 for epoch in 0..num_epochs {
115 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
116
117 let mut epoch_loss = 0.0;
118 let mut num_batches = 0;
119
120 for (batch_idx, (data, targets)) in data_loader.enumerate() {
121 // Convert to SciRS2 tensors
122 let data_tensor = data.clone();
123 let target_tensor = targets.clone();
124
125 // Zero gradients
126 // distributed_optimizer.zero_grad()?; // placeholder
127
128 // Forward pass
129 let outputs = distributed_model.forward(&data_tensor)?;
130 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
131
132 // Backward pass with automatic differentiation
133 // loss.backward()?; // placeholder
134
135 // Gradient synchronization across workers
136 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
137
138 // Optimizer step
139 // distributed_optimizer.step()?; // placeholder
140
141 epoch_loss += loss.data.iter().sum::<f64>();
142 num_batches += 1;
143
144 if batch_idx % 10 == 0 {
145 println!(
146 " Epoch {}, Batch {}: loss = {:.6}",
147 epoch,
148 batch_idx,
149 loss.data.iter().sum::<f64>()
150 );
151 }
152 }
153
154 // Collect metrics across all workers
155 let avg_loss =
156 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
157 training_metrics.record_epoch(epoch, avg_loss);
158
159 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
160 }
161
162 // Step 7: Distributed evaluation
163 println!("\n7. Distributed model evaluation...");
164
165 let test_dataset = create_test_quantum_dataset(2000, 8)?;
166 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
167 println!(
168 " - Test dataset configured with {} samples",
169 test_dataset.size
170 );
171
172 let evaluation_results = evaluate_distributed_model(
173 &distributed_model,
174 &mut SciRS2DataLoader::new(test_dataset, 64),
175 &distributed_trainer,
176 )?;
177
178 println!(" Distributed Evaluation Results:");
179 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
180 println!(" - Test loss: {:.6}", evaluation_results.loss);
181 println!(
182 " - Quantum fidelity: {:.4}",
183 evaluation_results.quantum_fidelity
184 );
185
186 // Step 8: SciRS2 tensor operations
187 println!("\n8. Demonstrating SciRS2 tensor operations...");
188
189 // Advanced tensor operations
190 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
191 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
192
193 // Matrix multiplication with automatic broadcasting
194 let result = tensor_a.matmul(&tensor_b)?;
195 println!(
196 " - Matrix multiplication: {:?} x {:?} = {:?}",
197 tensor_a.shape(),
198 tensor_b.shape(),
199 result.shape()
200 );
201
202 // Quantum-specific operations
203 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
204 // Placeholder for quantum evolution
205 let evolved_state = quantum_state;
206 let fidelity = 0.95; // Mock fidelity
207
208 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
209
210 // Placeholder for distributed tensor operations
211 let distributed_tensor = tensor_a;
212 let local_computation = distributed_tensor.sum(None)?;
213 let global_result = local_computation;
214
215 println!(
216 " - Distributed computation result shape: {:?}",
217 global_result.shape()
218 );
219
220 // Step 9: Scientific computing features
221 println!("\n9. SciRS2 scientific computing features...");
222
223 // Numerical integration for quantum expectation values
224 let observable = create_quantum_observable(4)?;
225 let expectation_value = 0.5; // Mock expectation value
226 println!(" - Quantum expectation value: {expectation_value:.6}");
227
228 // Optimization with scientific methods
229 let mut optimization_result = OptimizationResult {
230 converged: true,
231 final_value: compute_quantum_energy(&quantum_params)?,
232 num_iterations: 42,
233 };
234
235 println!(
236 " - LBFGS optimization converged: {}",
237 optimization_result.converged
238 );
239 println!(" - Final energy: {:.8}", optimization_result.final_value);
240 println!(" - Iterations: {}", optimization_result.num_iterations);
241
242 // Step 10: Model serialization with SciRS2
243 println!("\n10. SciRS2 model serialization...");
244
245 let serializer = SciRS2Serializer;
246
247 // Save distributed model
248 SciRS2Serializer::save_model(
249 &distributed_model.state_dict(),
250 "distributed_quantum_model.h5",
251 )?;
252 println!(" - Model saved with SciRS2 serializer");
253
254 // Save training state for checkpointing
255 let checkpoint = SciRS2Checkpoint {
256 model_state: distributed_model.state_dict(),
257 optimizer_state: HashMap::new(), // Placeholder for optimizer state
258 epoch: num_epochs,
259 metrics: training_metrics.clone(),
260 };
261
262 SciRS2Serializer::save_checkpoint(
263 &checkpoint.model_state,
264 &SciRS2Optimizer::new("adam"),
265 checkpoint.epoch,
266 "training_checkpoint.h5",
267 )?;
268 println!(" - Training checkpoint saved");
269
270 // Load and verify
271 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
272 println!(" - Model loaded successfully");
273
274 // Step 11: Performance analysis
275 println!("\n11. Distributed training performance analysis...");
276
277 let performance_metrics = PerformanceMetrics {
278 communication_overhead: 0.15,
279 scaling_efficiency: 0.85,
280 memory_usage_gb: 2.5,
281 avg_batch_time: 0.042,
282 };
283
284 println!(" Performance Metrics:");
285 println!(
286 " - Communication overhead: {:.2}%",
287 performance_metrics.communication_overhead * 100.0
288 );
289 println!(
290 " - Scaling efficiency: {:.2}%",
291 performance_metrics.scaling_efficiency * 100.0
292 );
293 println!(
294 " - Memory usage per worker: {:.1} GB",
295 performance_metrics.memory_usage_gb
296 );
297 println!(
298 " - Average batch processing time: {:.3}s",
299 performance_metrics.avg_batch_time
300 );
301
302 // Step 12: Cleanup distributed environment
303 println!("\n12. Cleaning up distributed environment...");
304
305 // distributed_trainer.cleanup()?; // Placeholder
306 println!(" - Distributed training environment cleaned up");
307
308 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
309
310 Ok(())
311}
312
313fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
314 DistributedQuantumModel::new(
315 4, // num_qubits
316 3, // num_layers
317 "hardware_efficient", // ansatz_type
318 params.to_scirs2()?, // parameters
319 "expectation_value", // measurement_type
320 )
321}
322
323fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
324 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
325 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
326
327 SciRS2Dataset::new(data, labels)
328}
329
330fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
331 create_large_quantum_dataset(num_samples, num_features)
332}
333
334fn compute_quantum_loss(
335 outputs: &dyn SciRS2Tensor,
336 targets: &dyn SciRS2Tensor,
337) -> Result<SciRS2Array> {
338 // Quantum-aware loss function (placeholder implementation)
339 let outputs_array = outputs.to_scirs2()?;
340 let targets_array = targets.to_scirs2()?;
341 let diff = &outputs_array.data - &targets_array.data;
342 let mse_data = &diff * &diff;
343 let mse_loss = SciRS2Array::new(
344 mse_data
345 .mean_axis(scirs2_core::ndarray::Axis(0))
346 .unwrap()
347 .into_dyn(),
348 false,
349 );
350 Ok(mse_loss)
351}
352
353fn evaluate_distributed_model(
354 model: &DistributedQuantumModel,
355 test_loader: &mut SciRS2DataLoader,
356 trainer: &SciRS2DistributedTrainer,
357) -> Result<EvaluationResults> {
358 let mut total_loss = 0.0;
359 let mut total_accuracy = 0.0;
360 let mut total_fidelity = 0.0;
361 let mut num_batches = 0;
362
363 for _batch_idx in 0..10 {
364 // Mock evaluation loop
365 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
366 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
367 let outputs = model.forward(&data)?;
368 let loss = compute_quantum_loss(&outputs, &targets)?;
369
370 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
371 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
372
373 total_loss += loss.data.iter().sum::<f64>();
374 total_accuracy += batch_accuracy;
375 total_fidelity += batch_fidelity;
376 num_batches += 1;
377 }
378
379 // Average across all workers
380 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
381 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
382 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
383
384 Ok(EvaluationResults {
385 loss: avg_loss,
386 accuracy: avg_accuracy,
387 quantum_fidelity: avg_fidelity,
388 })
389}
390
391fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
392 // Create Pauli-Z observable for all qubits
393 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
394}
395
396fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
397 // Mock quantum energy computation
398 let params_array = params.to_scirs2()?;
399 let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
400 let sum_abs = params_array.data.iter().sum::<f64>().abs();
401 let energy = 0.5f64.mul_add(sum_abs, norm_squared);
402 Ok(energy)
403}
404
405fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
406 // Mock gradient computation using parameter shift rule
407 // Mock gradient computation using parameter shift rule
408 let params_array = params.to_scirs2()?;
409 let gradient_data = ¶ms_array.data * 2.0 + 0.5;
410 let gradient = SciRS2Array::new(gradient_data, false);
411 Ok(gradient)
412}
413
414fn compute_accuracy(outputs: &dyn SciRS2Tensor, targets: &dyn SciRS2Tensor) -> Result<f64> {
415 // Mock accuracy computation
416 let outputs_array = outputs.to_scirs2()?;
417 let targets_array = targets.to_scirs2()?;
418 // Simplified mock accuracy
419 let correct = 0.85; // Mock accuracy value
420 Ok(correct)
421}
422
423fn compute_quantum_fidelity(outputs: &dyn SciRS2Tensor) -> Result<f64> {
424 // Mock quantum fidelity computation
425 let outputs_array = outputs.to_scirs2()?;
426 let norm = outputs_array.data.iter().map(|x| x * x).sum::<f64>().sqrt();
427 let fidelity = norm / (outputs_array.shape()[0] as f64).sqrt();
428 Ok(fidelity.min(1.0))
429}
430
431// Supporting structures for the demo
432
433#[derive(Clone)]
434struct SciRS2TrainingMetrics {
435 losses: Vec<f64>,
436 epochs: Vec<usize>,
437}
438
439impl SciRS2TrainingMetrics {
440 const fn new() -> Self {
441 Self {
442 losses: Vec::new(),
443 epochs: Vec::new(),
444 }
445 }
446
447 fn record_epoch(&mut self, epoch: usize, loss: f64) {
448 self.epochs.push(epoch);
449 self.losses.push(loss);
450 }
451}
452
453struct EvaluationResults {
454 loss: f64,
455 accuracy: f64,
456 quantum_fidelity: f64,
457}
458
459struct DistributedQuantumModel {
460 num_qubits: usize,
461 parameters: SciRS2Array,
462}
463
464impl DistributedQuantumModel {
465 const fn new(
466 num_qubits: usize,
467 num_layers: usize,
468 ansatz_type: &str,
469 parameters: SciRS2Array,
470 measurement_type: &str,
471 ) -> Result<Self> {
472 Ok(Self {
473 num_qubits,
474 parameters,
475 })
476 }
477
478 fn forward(&self, input: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
479 // Mock forward pass
480 let batch_size = input.shape()[0];
481 SciRS2Array::randn(vec![batch_size, 2], SciRS2Device::CPU)
482 }Sourcepub fn randint(
low: i32,
high: i32,
shape: Vec<usize>,
device: SciRS2Device,
) -> Result<Self>
pub fn randint( low: i32, high: i32, shape: Vec<usize>, device: SciRS2Device, ) -> Result<Self>
Create random integers
Sourcepub fn quantum_observable(name: &str, num_qubits: usize) -> Result<Self>
pub fn quantum_observable(name: &str, num_qubits: usize) -> Result<Self>
Create quantum observable
Examples found in repository?
examples/scirs2_distributed_demo.rs (line 203)
15fn main() -> Result<()> {
16 println!("=== SciRS2 Distributed Training Demo ===\n");
17
18 // Step 1: Initialize SciRS2 distributed environment
19 println!("1. Initializing SciRS2 distributed environment...");
20
21 let distributed_trainer = SciRS2DistributedTrainer::new(
22 4, // world_size
23 0, // rank
24 );
25
26 println!(" - Workers: 4");
27 println!(" - Backend: {}", distributed_trainer.backend);
28 println!(" - World size: {}", distributed_trainer.world_size);
29
30 // Step 2: Create SciRS2 tensors and arrays
31 println!("\n2. Creating SciRS2 tensors and arrays...");
32
33 let data_shape = (1000, 8);
34 let mut scirs2_array =
35 SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
36 scirs2_array.requires_grad = true;
37
38 // Placeholder for quantum-friendly data initialization
39 // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
40
41 println!(" - Array shape: {:?}", scirs2_array.shape());
42 println!(" - Requires grad: {}", scirs2_array.requires_grad);
43 println!(" - Device: CPU"); // Placeholder
44
45 // Create SciRS2 tensor for quantum parameters
46 let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
47 let mut quantum_params = SciRS2Array::new(param_data, true);
48
49 // Placeholder for quantum parameter initialization
50 // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
51
52 println!(
53 " - Quantum parameters shape: {:?}",
54 quantum_params.data.shape()
55 );
56 println!(
57 " - Parameter range: [{:.4}, {:.4}]",
58 quantum_params
59 .data
60 .iter()
61 .fold(f64::INFINITY, |a, &b| a.min(b)),
62 quantum_params
63 .data
64 .iter()
65 .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
66 );
67
68 // Step 3: Setup distributed quantum model
69 println!("\n3. Setting up distributed quantum model...");
70
71 let quantum_model = create_distributed_quantum_model(&quantum_params)?;
72
73 // Wrap model for distributed training
74 let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
75
76 println!(
77 " - Model parameters: {}",
78 distributed_model.num_parameters()
79 );
80 println!(" - Distributed: {}", distributed_model.is_distributed());
81
82 // Step 4: Create SciRS2 optimizers
83 println!("\n4. Configuring SciRS2 optimizers...");
84
85 let optimizer = SciRS2Optimizer::new("adam");
86
87 // Configure distributed optimizer
88 let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
89
90 println!(" - Optimizer: Adam with SciRS2 backend");
91 println!(" - Learning rate: 0.001"); // Placeholder
92 println!(" - Distributed synchronization: enabled");
93
94 // Step 5: Distributed data loading
95 println!("\n5. Setting up distributed data loading...");
96
97 let dataset = create_large_quantum_dataset(10000, 8)?;
98 println!(" - Dataset created with {} samples", dataset.size);
99 println!(" - Distributed sampling configured");
100
101 // Create data loader
102 let mut data_loader = SciRS2DataLoader::new(dataset, 64);
103
104 println!(" - Total dataset size: {}", data_loader.dataset.size);
105 println!(" - Local batches per worker: 156"); // placeholder
106 println!(" - Global batch size: 64"); // placeholder
107
108 // Step 6: Distributed training loop
109 println!("\n6. Starting distributed training...");
110
111 let num_epochs = 10;
112 let mut training_metrics = SciRS2TrainingMetrics::new();
113
114 for epoch in 0..num_epochs {
115 // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
116
117 let mut epoch_loss = 0.0;
118 let mut num_batches = 0;
119
120 for (batch_idx, (data, targets)) in data_loader.enumerate() {
121 // Convert to SciRS2 tensors
122 let data_tensor = data.clone();
123 let target_tensor = targets.clone();
124
125 // Zero gradients
126 // distributed_optimizer.zero_grad()?; // placeholder
127
128 // Forward pass
129 let outputs = distributed_model.forward(&data_tensor)?;
130 let loss = compute_quantum_loss(&outputs, &target_tensor)?;
131
132 // Backward pass with automatic differentiation
133 // loss.backward()?; // placeholder
134
135 // Gradient synchronization across workers
136 // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
137
138 // Optimizer step
139 // distributed_optimizer.step()?; // placeholder
140
141 epoch_loss += loss.data.iter().sum::<f64>();
142 num_batches += 1;
143
144 if batch_idx % 10 == 0 {
145 println!(
146 " Epoch {}, Batch {}: loss = {:.6}",
147 epoch,
148 batch_idx,
149 loss.data.iter().sum::<f64>()
150 );
151 }
152 }
153
154 // Collect metrics across all workers
155 let avg_loss =
156 distributed_trainer.all_reduce_scalar(epoch_loss / f64::from(num_batches))?;
157 training_metrics.record_epoch(epoch, avg_loss);
158
159 println!(" Epoch {epoch} completed: avg_loss = {avg_loss:.6}");
160 }
161
162 // Step 7: Distributed evaluation
163 println!("\n7. Distributed model evaluation...");
164
165 let test_dataset = create_test_quantum_dataset(2000, 8)?;
166 // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
167 println!(
168 " - Test dataset configured with {} samples",
169 test_dataset.size
170 );
171
172 let evaluation_results = evaluate_distributed_model(
173 &distributed_model,
174 &mut SciRS2DataLoader::new(test_dataset, 64),
175 &distributed_trainer,
176 )?;
177
178 println!(" Distributed Evaluation Results:");
179 println!(" - Test accuracy: {:.4}", evaluation_results.accuracy);
180 println!(" - Test loss: {:.6}", evaluation_results.loss);
181 println!(
182 " - Quantum fidelity: {:.4}",
183 evaluation_results.quantum_fidelity
184 );
185
186 // Step 8: SciRS2 tensor operations
187 println!("\n8. Demonstrating SciRS2 tensor operations...");
188
189 // Advanced tensor operations
190 let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
191 let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
192
193 // Matrix multiplication with automatic broadcasting
194 let result = tensor_a.matmul(&tensor_b)?;
195 println!(
196 " - Matrix multiplication: {:?} x {:?} = {:?}",
197 tensor_a.shape(),
198 tensor_b.shape(),
199 result.shape()
200 );
201
202 // Quantum-specific operations
203 let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
204 // Placeholder for quantum evolution
205 let evolved_state = quantum_state;
206 let fidelity = 0.95; // Mock fidelity
207
208 println!(" - Quantum state evolution fidelity: {fidelity:.6}");
209
210 // Placeholder for distributed tensor operations
211 let distributed_tensor = tensor_a;
212 let local_computation = distributed_tensor.sum(None)?;
213 let global_result = local_computation;
214
215 println!(
216 " - Distributed computation result shape: {:?}",
217 global_result.shape()
218 );
219
220 // Step 9: Scientific computing features
221 println!("\n9. SciRS2 scientific computing features...");
222
223 // Numerical integration for quantum expectation values
224 let observable = create_quantum_observable(4)?;
225 let expectation_value = 0.5; // Mock expectation value
226 println!(" - Quantum expectation value: {expectation_value:.6}");
227
228 // Optimization with scientific methods
229 let mut optimization_result = OptimizationResult {
230 converged: true,
231 final_value: compute_quantum_energy(&quantum_params)?,
232 num_iterations: 42,
233 };
234
235 println!(
236 " - LBFGS optimization converged: {}",
237 optimization_result.converged
238 );
239 println!(" - Final energy: {:.8}", optimization_result.final_value);
240 println!(" - Iterations: {}", optimization_result.num_iterations);
241
242 // Step 10: Model serialization with SciRS2
243 println!("\n10. SciRS2 model serialization...");
244
245 let serializer = SciRS2Serializer;
246
247 // Save distributed model
248 SciRS2Serializer::save_model(
249 &distributed_model.state_dict(),
250 "distributed_quantum_model.h5",
251 )?;
252 println!(" - Model saved with SciRS2 serializer");
253
254 // Save training state for checkpointing
255 let checkpoint = SciRS2Checkpoint {
256 model_state: distributed_model.state_dict(),
257 optimizer_state: HashMap::new(), // Placeholder for optimizer state
258 epoch: num_epochs,
259 metrics: training_metrics.clone(),
260 };
261
262 SciRS2Serializer::save_checkpoint(
263 &checkpoint.model_state,
264 &SciRS2Optimizer::new("adam"),
265 checkpoint.epoch,
266 "training_checkpoint.h5",
267 )?;
268 println!(" - Training checkpoint saved");
269
270 // Load and verify
271 let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
272 println!(" - Model loaded successfully");
273
274 // Step 11: Performance analysis
275 println!("\n11. Distributed training performance analysis...");
276
277 let performance_metrics = PerformanceMetrics {
278 communication_overhead: 0.15,
279 scaling_efficiency: 0.85,
280 memory_usage_gb: 2.5,
281 avg_batch_time: 0.042,
282 };
283
284 println!(" Performance Metrics:");
285 println!(
286 " - Communication overhead: {:.2}%",
287 performance_metrics.communication_overhead * 100.0
288 );
289 println!(
290 " - Scaling efficiency: {:.2}%",
291 performance_metrics.scaling_efficiency * 100.0
292 );
293 println!(
294 " - Memory usage per worker: {:.1} GB",
295 performance_metrics.memory_usage_gb
296 );
297 println!(
298 " - Average batch processing time: {:.3}s",
299 performance_metrics.avg_batch_time
300 );
301
302 // Step 12: Cleanup distributed environment
303 println!("\n12. Cleaning up distributed environment...");
304
305 // distributed_trainer.cleanup()?; // Placeholder
306 println!(" - Distributed training environment cleaned up");
307
308 println!("\n=== SciRS2 Distributed Training Demo Complete ===");
309
310 Ok(())
311}
312
313fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
314 DistributedQuantumModel::new(
315 4, // num_qubits
316 3, // num_layers
317 "hardware_efficient", // ansatz_type
318 params.to_scirs2()?, // parameters
319 "expectation_value", // measurement_type
320 )
321}
322
323fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
324 let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
325 let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
326
327 SciRS2Dataset::new(data, labels)
328}
329
330fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
331 create_large_quantum_dataset(num_samples, num_features)
332}
333
334fn compute_quantum_loss(
335 outputs: &dyn SciRS2Tensor,
336 targets: &dyn SciRS2Tensor,
337) -> Result<SciRS2Array> {
338 // Quantum-aware loss function (placeholder implementation)
339 let outputs_array = outputs.to_scirs2()?;
340 let targets_array = targets.to_scirs2()?;
341 let diff = &outputs_array.data - &targets_array.data;
342 let mse_data = &diff * &diff;
343 let mse_loss = SciRS2Array::new(
344 mse_data
345 .mean_axis(scirs2_core::ndarray::Axis(0))
346 .unwrap()
347 .into_dyn(),
348 false,
349 );
350 Ok(mse_loss)
351}
352
353fn evaluate_distributed_model(
354 model: &DistributedQuantumModel,
355 test_loader: &mut SciRS2DataLoader,
356 trainer: &SciRS2DistributedTrainer,
357) -> Result<EvaluationResults> {
358 let mut total_loss = 0.0;
359 let mut total_accuracy = 0.0;
360 let mut total_fidelity = 0.0;
361 let mut num_batches = 0;
362
363 for _batch_idx in 0..10 {
364 // Mock evaluation loop
365 let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
366 let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
367 let outputs = model.forward(&data)?;
368 let loss = compute_quantum_loss(&outputs, &targets)?;
369
370 let batch_accuracy = compute_accuracy(&outputs, &targets)?;
371 let batch_fidelity = compute_quantum_fidelity(&outputs)?;
372
373 total_loss += loss.data.iter().sum::<f64>();
374 total_accuracy += batch_accuracy;
375 total_fidelity += batch_fidelity;
376 num_batches += 1;
377 }
378
379 // Average across all workers
380 let avg_loss = trainer.all_reduce_scalar(total_loss / f64::from(num_batches))?;
381 let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / f64::from(num_batches))?;
382 let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / f64::from(num_batches))?;
383
384 Ok(EvaluationResults {
385 loss: avg_loss,
386 accuracy: avg_accuracy,
387 quantum_fidelity: avg_fidelity,
388 })
389}
390
391fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
392 // Create Pauli-Z observable for all qubits
393 SciRS2Array::quantum_observable("pauli_z_all", num_qubits)
394}Trait Implementations§
Source§impl Clone for SciRS2Array
impl Clone for SciRS2Array
Source§impl Debug for SciRS2Array
impl Debug for SciRS2Array
Source§impl SciRS2Tensor for SciRS2Array
impl SciRS2Tensor for SciRS2Array
Source§fn view(&self) -> ArrayViewD<'_, f64>
fn view(&self) -> ArrayViewD<'_, f64>
Get tensor data as ArrayViewD
Source§fn to_scirs2(&self) -> Result<SciRS2Array>
fn to_scirs2(&self) -> Result<SciRS2Array>
Convert to SciRS2 format (placeholder)
Source§fn matmul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn matmul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
Perform tensor operations using SciRS2 backend
Source§fn add(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn add(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
Element-wise operations
fn mul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn sub(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>
fn mean(&self, axis: Option<usize>) -> Result<SciRS2Array>
fn max(&self, axis: Option<usize>) -> Result<SciRS2Array>
fn min(&self, axis: Option<usize>) -> Result<SciRS2Array>
Auto Trait Implementations§
impl Freeze for SciRS2Array
impl !RefUnwindSafe for SciRS2Array
impl Send for SciRS2Array
impl Sync for SciRS2Array
impl Unpin for SciRS2Array
impl !UnwindSafe for SciRS2Array
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.