SciRS2Array

Struct SciRS2Array 

Source
pub struct SciRS2Array {
    pub data: ArrayD<f64>,
    pub requires_grad: bool,
    pub grad: Option<ArrayD<f64>>,
    pub grad_fn: Option<Box<dyn GradFunction>>,
}
Expand description

SciRS2 array wrapper for quantum ML operations

Fields§

§data: ArrayD<f64>

Array data

§requires_grad: bool

Whether gradients are required

§grad: Option<ArrayD<f64>>

Gradient accumulator

§grad_fn: Option<Box<dyn GradFunction>>

Operation history for backpropagation

Implementations§

Source§

impl SciRS2Array

Source

pub fn new(data: ArrayD<f64>, requires_grad: bool) -> Self

Create a new SciRS2Array

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 34)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
310
311fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
312    DistributedQuantumModel::new(
313        4,                    // num_qubits
314        3,                    // num_layers
315        "hardware_efficient", // ansatz_type
316        params.to_scirs2()?,  // parameters
317        "expectation_value",  // measurement_type
318    )
319}
320
321fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
322    let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
323    let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
324
325    SciRS2Dataset::new(data, labels)
326}
327
328fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
329    create_large_quantum_dataset(num_samples, num_features)
330}
331
332fn compute_quantum_loss(
333    outputs: &dyn SciRS2Tensor,
334    targets: &dyn SciRS2Tensor,
335) -> Result<SciRS2Array> {
336    // Quantum-aware loss function (placeholder implementation)
337    let outputs_array = outputs.to_scirs2()?;
338    let targets_array = targets.to_scirs2()?;
339    let diff = &outputs_array.data - &targets_array.data;
340    let mse_data = &diff * &diff;
341    let mse_loss = SciRS2Array::new(
342        mse_data.mean_axis(scirs2_core::ndarray::Axis(0)).unwrap().into_dyn(),
343        false,
344    );
345    Ok(mse_loss)
346}
347
348fn evaluate_distributed_model(
349    model: &DistributedQuantumModel,
350    test_loader: &mut SciRS2DataLoader,
351    trainer: &SciRS2DistributedTrainer,
352) -> Result<EvaluationResults> {
353    let mut total_loss = 0.0;
354    let mut total_accuracy = 0.0;
355    let mut total_fidelity = 0.0;
356    let mut num_batches = 0;
357
358    for _batch_idx in 0..10 {
359        // Mock evaluation loop
360        let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
361        let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
362        let outputs = model.forward(&data)?;
363        let loss = compute_quantum_loss(&outputs, &targets)?;
364
365        let batch_accuracy = compute_accuracy(&outputs, &targets)?;
366        let batch_fidelity = compute_quantum_fidelity(&outputs)?;
367
368        total_loss += loss.data.iter().sum::<f64>();
369        total_accuracy += batch_accuracy;
370        total_fidelity += batch_fidelity;
371        num_batches += 1;
372    }
373
374    // Average across all workers
375    let avg_loss = trainer.all_reduce_scalar(total_loss / num_batches as f64)?;
376    let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / num_batches as f64)?;
377    let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / num_batches as f64)?;
378
379    Ok(EvaluationResults {
380        loss: avg_loss,
381        accuracy: avg_accuracy,
382        quantum_fidelity: avg_fidelity,
383    })
384}
385
386fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
387    // Create Pauli-Z observable for all qubits
388    Ok(SciRS2Array::quantum_observable("pauli_z_all", num_qubits)?)
389}
390
391fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
392    // Mock quantum energy computation
393    let params_array = params.to_scirs2()?;
394    let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
395    let sum_abs = params_array.data.iter().sum::<f64>().abs();
396    let energy = norm_squared + 0.5 * sum_abs;
397    Ok(energy)
398}
399
400fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
401    // Mock gradient computation using parameter shift rule
402    // Mock gradient computation using parameter shift rule
403    let params_array = params.to_scirs2()?;
404    let gradient_data = &params_array.data * 2.0 + 0.5;
405    let gradient = SciRS2Array::new(gradient_data, false);
406    Ok(gradient)
407}
Source

pub fn from_array<D: Dimension>(arr: Array<f64, D>) -> Self

Create from ndarray

Examples found in repository?
examples/pytorch_integration_demo.rs (line 174)
136fn create_quantum_datasets() -> Result<(MemoryDataLoader, MemoryDataLoader)> {
137    // Create synthetic quantum-friendly dataset
138    let num_train = 800;
139    let num_test = 200;
140    let num_features = 4;
141
142    // Training data with quantum entanglement patterns
143    let train_data = Array2::from_shape_fn((num_train, num_features), |(i, j)| {
144        let phase = (i as f64 * 0.1) + (j as f64 * 0.2);
145        (phase.sin() + (phase * 2.0).cos()) * 0.5
146    });
147
148    let train_labels = Array1::from_shape_fn(num_train, |i| {
149        // Create labels based on quantum-like correlations
150        let sum = (0..num_features).map(|j| train_data[[i, j]]).sum::<f64>();
151        if sum > 0.0 {
152            1.0
153        } else {
154            0.0
155        }
156    });
157
158    // Test data
159    let test_data = Array2::from_shape_fn((num_test, num_features), |(i, j)| {
160        let phase = (i as f64 * 0.15) + (j as f64 * 0.25);
161        (phase.sin() + (phase * 2.0).cos()) * 0.5
162    });
163
164    let test_labels = Array1::from_shape_fn(num_test, |i| {
165        let sum = (0..num_features).map(|j| test_data[[i, j]]).sum::<f64>();
166        if sum > 0.0 {
167            1.0
168        } else {
169            0.0
170        }
171    });
172
173    let train_loader = MemoryDataLoader::new(
174        SciRS2Array::from_array(train_data.into_dyn()),
175        SciRS2Array::from_array(train_labels.into_dyn()),
176        32,
177        true,
178    )?;
179    let test_loader = MemoryDataLoader::new(
180        SciRS2Array::from_array(test_data.into_dyn()),
181        SciRS2Array::from_array(test_labels.into_dyn()),
182        32,
183        false,
184    )?;
185
186    Ok((train_loader, test_loader))
187}
Source

pub fn with_grad<D: Dimension>(arr: Array<f64, D>) -> Self

Create with gradient tracking

Source

pub fn zero_grad(&mut self)

Zero gradients

Source

pub fn backward(&mut self) -> Result<()>

Backward pass

Source

pub fn matmul(&self, other: &SciRS2Array) -> Result<SciRS2Array>

Matrix multiplication using SciRS2 backend

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 192)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
Source

pub fn add(&self, other: &SciRS2Array) -> Result<SciRS2Array>

Element-wise addition

Source

pub fn mul(&self, other: &SciRS2Array) -> Result<SciRS2Array>

Element-wise multiplication

Source

pub fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>

Reduction sum

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 210)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
Source§

impl SciRS2Array

Additional SciRS2Array methods for compatibility

Source

pub fn randn(shape: Vec<usize>, device: SciRS2Device) -> Result<Self>

Create array with specified device

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 188)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
310
311fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
312    DistributedQuantumModel::new(
313        4,                    // num_qubits
314        3,                    // num_layers
315        "hardware_efficient", // ansatz_type
316        params.to_scirs2()?,  // parameters
317        "expectation_value",  // measurement_type
318    )
319}
320
321fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
322    let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
323    let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
324
325    SciRS2Dataset::new(data, labels)
326}
327
328fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
329    create_large_quantum_dataset(num_samples, num_features)
330}
331
332fn compute_quantum_loss(
333    outputs: &dyn SciRS2Tensor,
334    targets: &dyn SciRS2Tensor,
335) -> Result<SciRS2Array> {
336    // Quantum-aware loss function (placeholder implementation)
337    let outputs_array = outputs.to_scirs2()?;
338    let targets_array = targets.to_scirs2()?;
339    let diff = &outputs_array.data - &targets_array.data;
340    let mse_data = &diff * &diff;
341    let mse_loss = SciRS2Array::new(
342        mse_data.mean_axis(scirs2_core::ndarray::Axis(0)).unwrap().into_dyn(),
343        false,
344    );
345    Ok(mse_loss)
346}
347
348fn evaluate_distributed_model(
349    model: &DistributedQuantumModel,
350    test_loader: &mut SciRS2DataLoader,
351    trainer: &SciRS2DistributedTrainer,
352) -> Result<EvaluationResults> {
353    let mut total_loss = 0.0;
354    let mut total_accuracy = 0.0;
355    let mut total_fidelity = 0.0;
356    let mut num_batches = 0;
357
358    for _batch_idx in 0..10 {
359        // Mock evaluation loop
360        let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
361        let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
362        let outputs = model.forward(&data)?;
363        let loss = compute_quantum_loss(&outputs, &targets)?;
364
365        let batch_accuracy = compute_accuracy(&outputs, &targets)?;
366        let batch_fidelity = compute_quantum_fidelity(&outputs)?;
367
368        total_loss += loss.data.iter().sum::<f64>();
369        total_accuracy += batch_accuracy;
370        total_fidelity += batch_fidelity;
371        num_batches += 1;
372    }
373
374    // Average across all workers
375    let avg_loss = trainer.all_reduce_scalar(total_loss / num_batches as f64)?;
376    let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / num_batches as f64)?;
377    let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / num_batches as f64)?;
378
379    Ok(EvaluationResults {
380        loss: avg_loss,
381        accuracy: avg_accuracy,
382        quantum_fidelity: avg_fidelity,
383    })
384}
385
386fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
387    // Create Pauli-Z observable for all qubits
388    Ok(SciRS2Array::quantum_observable("pauli_z_all", num_qubits)?)
389}
390
391fn compute_quantum_energy(params: &dyn SciRS2Tensor) -> Result<f64> {
392    // Mock quantum energy computation
393    let params_array = params.to_scirs2()?;
394    let norm_squared = params_array.data.iter().map(|x| x * x).sum::<f64>();
395    let sum_abs = params_array.data.iter().sum::<f64>().abs();
396    let energy = norm_squared + 0.5 * sum_abs;
397    Ok(energy)
398}
399
400fn compute_quantum_gradient(params: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
401    // Mock gradient computation using parameter shift rule
402    // Mock gradient computation using parameter shift rule
403    let params_array = params.to_scirs2()?;
404    let gradient_data = &params_array.data * 2.0 + 0.5;
405    let gradient = SciRS2Array::new(gradient_data, false);
406    Ok(gradient)
407}
408
409fn compute_accuracy(outputs: &dyn SciRS2Tensor, targets: &dyn SciRS2Tensor) -> Result<f64> {
410    // Mock accuracy computation
411    let outputs_array = outputs.to_scirs2()?;
412    let targets_array = targets.to_scirs2()?;
413    // Simplified mock accuracy
414    let correct = 0.85; // Mock accuracy value
415    Ok(correct)
416}
417
418fn compute_quantum_fidelity(outputs: &dyn SciRS2Tensor) -> Result<f64> {
419    // Mock quantum fidelity computation
420    let outputs_array = outputs.to_scirs2()?;
421    let norm = outputs_array.data.iter().map(|x| x * x).sum::<f64>().sqrt();
422    let fidelity = norm / (outputs_array.shape()[0] as f64).sqrt();
423    Ok(fidelity.min(1.0))
424}
425
426// Supporting structures for the demo
427
428#[derive(Clone)]
429struct SciRS2TrainingMetrics {
430    losses: Vec<f64>,
431    epochs: Vec<usize>,
432}
433
434impl SciRS2TrainingMetrics {
435    fn new() -> Self {
436        Self {
437            losses: Vec::new(),
438            epochs: Vec::new(),
439        }
440    }
441
442    fn record_epoch(&mut self, epoch: usize, loss: f64) {
443        self.epochs.push(epoch);
444        self.losses.push(loss);
445    }
446}
447
448struct EvaluationResults {
449    loss: f64,
450    accuracy: f64,
451    quantum_fidelity: f64,
452}
453
454struct DistributedQuantumModel {
455    num_qubits: usize,
456    parameters: SciRS2Array,
457}
458
459impl DistributedQuantumModel {
460    fn new(
461        num_qubits: usize,
462        num_layers: usize,
463        ansatz_type: &str,
464        parameters: SciRS2Array,
465        measurement_type: &str,
466    ) -> Result<Self> {
467        Ok(Self {
468            num_qubits,
469            parameters,
470        })
471    }
472
473    fn forward(&self, input: &dyn SciRS2Tensor) -> Result<SciRS2Array> {
474        // Mock forward pass
475        let batch_size = input.shape()[0];
476        Ok(SciRS2Array::randn(vec![batch_size, 2], SciRS2Device::CPU)?)
477    }
Source

pub fn ones_like(&self) -> Result<Self>

Create ones_like array

Source

pub fn randint( low: i32, high: i32, shape: Vec<usize>, device: SciRS2Device, ) -> Result<Self>

Create random integers

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 323)
321fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
322    let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
323    let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
324
325    SciRS2Dataset::new(data, labels)
326}
Source

pub fn quantum_observable(name: &str, num_qubits: usize) -> Result<Self>

Create quantum observable

Examples found in repository?
examples/scirs2_distributed_demo.rs (line 201)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
310
311fn create_distributed_quantum_model(params: &dyn SciRS2Tensor) -> Result<DistributedQuantumModel> {
312    DistributedQuantumModel::new(
313        4,                    // num_qubits
314        3,                    // num_layers
315        "hardware_efficient", // ansatz_type
316        params.to_scirs2()?,  // parameters
317        "expectation_value",  // measurement_type
318    )
319}
320
321fn create_large_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
322    let data = SciRS2Array::randn(vec![num_samples, num_features], SciRS2Device::CPU)?.data;
323    let labels = SciRS2Array::randint(0, 2, vec![num_samples], SciRS2Device::CPU)?.data;
324
325    SciRS2Dataset::new(data, labels)
326}
327
328fn create_test_quantum_dataset(num_samples: usize, num_features: usize) -> Result<SciRS2Dataset> {
329    create_large_quantum_dataset(num_samples, num_features)
330}
331
332fn compute_quantum_loss(
333    outputs: &dyn SciRS2Tensor,
334    targets: &dyn SciRS2Tensor,
335) -> Result<SciRS2Array> {
336    // Quantum-aware loss function (placeholder implementation)
337    let outputs_array = outputs.to_scirs2()?;
338    let targets_array = targets.to_scirs2()?;
339    let diff = &outputs_array.data - &targets_array.data;
340    let mse_data = &diff * &diff;
341    let mse_loss = SciRS2Array::new(
342        mse_data.mean_axis(scirs2_core::ndarray::Axis(0)).unwrap().into_dyn(),
343        false,
344    );
345    Ok(mse_loss)
346}
347
348fn evaluate_distributed_model(
349    model: &DistributedQuantumModel,
350    test_loader: &mut SciRS2DataLoader,
351    trainer: &SciRS2DistributedTrainer,
352) -> Result<EvaluationResults> {
353    let mut total_loss = 0.0;
354    let mut total_accuracy = 0.0;
355    let mut total_fidelity = 0.0;
356    let mut num_batches = 0;
357
358    for _batch_idx in 0..10 {
359        // Mock evaluation loop
360        let data = SciRS2Array::randn(vec![32, 8], SciRS2Device::CPU)?;
361        let targets = SciRS2Array::randn(vec![32], SciRS2Device::CPU)?;
362        let outputs = model.forward(&data)?;
363        let loss = compute_quantum_loss(&outputs, &targets)?;
364
365        let batch_accuracy = compute_accuracy(&outputs, &targets)?;
366        let batch_fidelity = compute_quantum_fidelity(&outputs)?;
367
368        total_loss += loss.data.iter().sum::<f64>();
369        total_accuracy += batch_accuracy;
370        total_fidelity += batch_fidelity;
371        num_batches += 1;
372    }
373
374    // Average across all workers
375    let avg_loss = trainer.all_reduce_scalar(total_loss / num_batches as f64)?;
376    let avg_accuracy = trainer.all_reduce_scalar(total_accuracy / num_batches as f64)?;
377    let avg_fidelity = trainer.all_reduce_scalar(total_fidelity / num_batches as f64)?;
378
379    Ok(EvaluationResults {
380        loss: avg_loss,
381        accuracy: avg_accuracy,
382        quantum_fidelity: avg_fidelity,
383    })
384}
385
386fn create_quantum_observable(num_qubits: usize) -> Result<SciRS2Array> {
387    // Create Pauli-Z observable for all qubits
388    Ok(SciRS2Array::quantum_observable("pauli_z_all", num_qubits)?)
389}

Trait Implementations§

Source§

impl Clone for SciRS2Array

Source§

fn clone(&self) -> Self

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for SciRS2Array

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl SciRS2Tensor for SciRS2Array

Source§

fn shape(&self) -> &[usize]

Get tensor shape
Source§

fn view(&self) -> ArrayViewD<'_, f64>

Get tensor data as ArrayViewD
Source§

fn to_scirs2(&self) -> Result<SciRS2Array>

Convert to SciRS2 format (placeholder)
Source§

fn matmul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>

Perform tensor operations using SciRS2 backend
Source§

fn add(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>

Element-wise operations
Source§

fn mul(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>

Source§

fn sub(&self, other: &dyn SciRS2Tensor) -> Result<SciRS2Array>

Source§

fn sum(&self, axis: Option<usize>) -> Result<SciRS2Array>

Reduction operations
Source§

fn mean(&self, axis: Option<usize>) -> Result<SciRS2Array>

Source§

fn max(&self, axis: Option<usize>) -> Result<SciRS2Array>

Source§

fn min(&self, axis: Option<usize>) -> Result<SciRS2Array>

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> Ungil for T
where T: Send,