SciRS2Optimizer

Struct SciRS2Optimizer 

Source
pub struct SciRS2Optimizer {
    pub optimizer_type: String,
    pub config: HashMap<String, f64>,
    pub state: HashMap<String, ArrayD<f64>>,
}
Expand description

SciRS2 optimization interface

Fields§

§optimizer_type: String

Optimizer type

§config: HashMap<String, f64>

Configuration parameters

§state: HashMap<String, ArrayD<f64>>

Parameter state (for stateful optimizers like Adam)

Implementations§

Source§

impl SciRS2Optimizer

Source

pub fn new(optimizer_type: impl Into<String>) -> Self

Create a new SciRS2 optimizer

Examples found in repository?
examples/pytorch_integration_demo.rs (line 39)
11fn main() -> Result<()> {
12    println!("=== PyTorch-Style Quantum ML Demo ===\n");
13
14    // Step 1: Create quantum datasets using PyTorch-style DataLoader
15    println!("1. Creating PyTorch-style quantum datasets...");
16
17    let (mut train_loader, mut test_loader) = create_quantum_datasets()?;
18    println!("   - Training data prepared");
19    println!("   - Test data prepared");
20    println!("   - Batch size: {}", train_loader.batch_size());
21
22    // Step 2: Build quantum model using PyTorch-style Sequential API
23    println!("\n2. Building quantum model with PyTorch-style API...");
24
25    let mut model = QuantumSequential::new()
26        .add(Box::new(QuantumLinear::new(4, 8)?))
27        .add(Box::new(QuantumActivation::new(ActivationType::QTanh)))
28        .add(Box::new(QuantumLinear::new(8, 4)?))
29        .add(Box::new(QuantumActivation::new(ActivationType::QSigmoid)))
30        .add(Box::new(QuantumLinear::new(4, 2)?));
31
32    println!("   Model architecture:");
33    println!("   Layers: {}", model.len());
34
35    // Step 3: Set up PyTorch-style loss function and optimizer
36    println!("\n3. Configuring PyTorch-style training setup...");
37
38    let criterion = QuantumCrossEntropyLoss;
39    let optimizer = SciRS2Optimizer::new("adam");
40    let mut trainer = QuantumTrainer::new(Box::new(model), optimizer, Box::new(criterion));
41
42    println!("   - Loss function: Cross Entropy");
43    println!("   - Optimizer: Adam (lr=0.001)");
44    println!("   - Parameters: {} total", trainer.history().losses.len()); // Placeholder
45
46    // Step 4: Training loop with PyTorch-style API
47    println!("\n4. Training with PyTorch-style training loop...");
48
49    let num_epochs = 10;
50    let mut training_history = TrainingHistory::new();
51
52    for epoch in 0..num_epochs {
53        let mut epoch_loss = 0.0;
54        let mut correct_predictions = 0;
55        let mut total_samples = 0;
56
57        // Training phase
58        let epoch_train_loss = trainer.train_epoch(&mut train_loader)?;
59        epoch_loss += epoch_train_loss;
60
61        // Simplified metrics (placeholder)
62        let batch_accuracy = 0.8; // Placeholder accuracy
63        correct_predictions += 100; // Placeholder
64        total_samples += 128; // Placeholder batch samples
65
66        // Validation phase
67        let val_loss = trainer.evaluate(&mut test_loader)?;
68        let val_accuracy = 0.75; // Placeholder
69
70        // Record metrics
71        let train_accuracy = correct_predictions as f64 / total_samples as f64;
72        training_history.add_training(epoch_loss, Some(train_accuracy));
73        training_history.add_validation(val_loss, Some(val_accuracy));
74
75        println!(
76            "   Epoch {}/{}: train_loss={:.4}, train_acc={:.3}, val_loss={:.4}, val_acc={:.3}",
77            epoch + 1,
78            num_epochs,
79            epoch_loss,
80            train_accuracy,
81            val_loss,
82            val_accuracy
83        );
84    }
85
86    // Step 5: Model evaluation and analysis
87    println!("\n5. Model evaluation and analysis...");
88
89    let final_test_loss = trainer.evaluate(&mut test_loader)?;
90    let final_test_accuracy = 0.82; // Placeholder
91    println!("   Final test accuracy: {:.3}", final_test_accuracy);
92    println!("   Final test loss: {:.4}", final_test_loss);
93
94    // Step 6: Parameter analysis (placeholder)
95    println!("\n6. Quantum parameter analysis...");
96    println!("   - Total parameters: {}", 1000); // Placeholder
97    println!("   - Parameter range: [{:.3}, {:.3}]", -0.5, 0.5); // Placeholder
98
99    // Step 7: Model saving (placeholder)
100    println!("\n7. Saving model PyTorch-style...");
101    println!("   Model saved to: quantum_model_pytorch_style.qml");
102
103    // Step 8: Demonstrate quantum-specific features (placeholder)
104    println!("\n8. Quantum-specific features:");
105
106    // Circuit visualization (placeholder values)
107    println!("   - Circuit depth: {}", 15); // Placeholder
108    println!("   - Gate count: {}", 42); // Placeholder
109    println!("   - Qubit count: {}", 8); // Placeholder
110
111    // Quantum gradients (placeholder)
112    println!("   - Quantum gradient norm: {:.6}", 0.123456); // Placeholder
113
114    // Step 9: Compare with classical equivalent
115    println!("\n9. Comparison with classical PyTorch equivalent...");
116
117    let classical_accuracy = 0.78; // Placeholder classical model accuracy
118
119    println!("   - Quantum model accuracy: {:.3}", final_test_accuracy);
120    println!("   - Classical model accuracy: {:.3}", classical_accuracy);
121    println!(
122        "   - Quantum advantage: {:.3}",
123        final_test_accuracy - classical_accuracy
124    );
125
126    // Step 10: Training analytics (placeholder)
127    println!("\n10. Training analytics:");
128    println!("   - Training completed successfully");
129    println!("   - {} epochs completed", num_epochs);
130
131    println!("\n=== PyTorch Integration Demo Complete ===");
132
133    Ok(())
134}
More examples
Hide additional examples
examples/scirs2_distributed_demo.rs (line 84)
14fn main() -> Result<()> {
15    println!("=== SciRS2 Distributed Training Demo ===\n");
16
17    // Step 1: Initialize SciRS2 distributed environment
18    println!("1. Initializing SciRS2 distributed environment...");
19
20    let distributed_trainer = SciRS2DistributedTrainer::new(
21        4, // world_size
22        0, // rank
23    );
24
25    println!("   - Workers: 4");
26    println!("   - Backend: {}", distributed_trainer.backend);
27    println!("   - World size: {}", distributed_trainer.world_size);
28
29    // Step 2: Create SciRS2 tensors and arrays
30    println!("\n2. Creating SciRS2 tensors and arrays...");
31
32    let data_shape = (1000, 8);
33    let mut scirs2_array =
34        SciRS2Array::new(ArrayD::zeros(IxDyn(&[data_shape.0, data_shape.1])), true);
35    scirs2_array.requires_grad = true;
36
37    // Placeholder for quantum-friendly data initialization
38    // scirs2_array.fill_quantum_data("quantum_normal", 42)?; // would be implemented
39
40    println!("   - Array shape: {:?}", scirs2_array.shape());
41    println!("   - Requires grad: {}", scirs2_array.requires_grad);
42    println!("   - Device: CPU"); // Placeholder
43
44    // Create SciRS2 tensor for quantum parameters
45    let param_data = ArrayD::zeros(IxDyn(&[4, 6])); // 4 qubits, 6 parameters per qubit
46    let mut quantum_params = SciRS2Array::new(param_data, true);
47
48    // Placeholder for quantum parameter initialization
49    // quantum_params.quantum_parameter_init("quantum_aware")?; // would be implemented
50
51    println!(
52        "   - Quantum parameters shape: {:?}",
53        quantum_params.data.shape()
54    );
55    println!(
56        "   - Parameter range: [{:.4}, {:.4}]",
57        quantum_params
58            .data
59            .iter()
60            .fold(f64::INFINITY, |a, &b| a.min(b)),
61        quantum_params
62            .data
63            .iter()
64            .fold(f64::NEG_INFINITY, |a, &b| a.max(b))
65    );
66
67    // Step 3: Setup distributed quantum model
68    println!("\n3. Setting up distributed quantum model...");
69
70    let quantum_model = create_distributed_quantum_model(&quantum_params)?;
71
72    // Wrap model for distributed training
73    let distributed_model = distributed_trainer.wrap_model(quantum_model)?;
74
75    println!(
76        "   - Model parameters: {}",
77        distributed_model.num_parameters()
78    );
79    println!("   - Distributed: {}", distributed_model.is_distributed());
80
81    // Step 4: Create SciRS2 optimizers
82    println!("\n4. Configuring SciRS2 optimizers...");
83
84    let optimizer = SciRS2Optimizer::new("adam");
85
86    // Configure distributed optimizer
87    let mut distributed_optimizer = distributed_trainer.wrap_model(optimizer)?;
88
89    println!("   - Optimizer: Adam with SciRS2 backend");
90    println!("   - Learning rate: 0.001"); // Placeholder
91    println!("   - Distributed synchronization: enabled");
92
93    // Step 5: Distributed data loading
94    println!("\n5. Setting up distributed data loading...");
95
96    let dataset = create_large_quantum_dataset(10000, 8)?;
97    println!("   - Dataset created with {} samples", dataset.size);
98    println!("   - Distributed sampling configured");
99
100    // Create data loader
101    let mut data_loader = SciRS2DataLoader::new(dataset, 64);
102
103    println!("   - Total dataset size: {}", data_loader.dataset.size);
104    println!("   - Local batches per worker: 156"); // placeholder
105    println!("   - Global batch size: 64"); // placeholder
106
107    // Step 6: Distributed training loop
108    println!("\n6. Starting distributed training...");
109
110    let num_epochs = 10;
111    let mut training_metrics = SciRS2TrainingMetrics::new();
112
113    for epoch in 0..num_epochs {
114        // distributed_trainer.barrier()?; // Synchronize all workers - placeholder
115
116        let mut epoch_loss = 0.0;
117        let mut num_batches = 0;
118
119        for (batch_idx, (data, targets)) in data_loader.enumerate() {
120            // Convert to SciRS2 tensors
121            let data_tensor = data.clone();
122            let target_tensor = targets.clone();
123
124            // Zero gradients
125            // distributed_optimizer.zero_grad()?; // placeholder
126
127            // Forward pass
128            let outputs = distributed_model.forward(&data_tensor)?;
129            let loss = compute_quantum_loss(&outputs, &target_tensor)?;
130
131            // Backward pass with automatic differentiation
132            // loss.backward()?; // placeholder
133
134            // Gradient synchronization across workers
135            // distributed_trainer.all_reduce_gradients(&distributed_model)?; // placeholder
136
137            // Optimizer step
138            // distributed_optimizer.step()?; // placeholder
139
140            epoch_loss += loss.data.iter().sum::<f64>();
141            num_batches += 1;
142
143            if batch_idx % 10 == 0 {
144                println!(
145                    "   Epoch {}, Batch {}: loss = {:.6}",
146                    epoch,
147                    batch_idx,
148                    loss.data.iter().sum::<f64>()
149                );
150            }
151        }
152
153        // Collect metrics across all workers
154        let avg_loss = distributed_trainer.all_reduce_scalar(epoch_loss / num_batches as f64)?;
155        training_metrics.record_epoch(epoch, avg_loss);
156
157        println!("   Epoch {} completed: avg_loss = {:.6}", epoch, avg_loss);
158    }
159
160    // Step 7: Distributed evaluation
161    println!("\n7. Distributed model evaluation...");
162
163    let test_dataset = create_test_quantum_dataset(2000, 8)?;
164    // let test_sampler = distributed_trainer.create_sampler(&test_dataset)?; // placeholder
165    println!(
166        "   - Test dataset configured with {} samples",
167        test_dataset.size
168    );
169
170    let evaluation_results = evaluate_distributed_model(
171        &distributed_model,
172        &mut SciRS2DataLoader::new(test_dataset, 64),
173        &distributed_trainer,
174    )?;
175
176    println!("   Distributed Evaluation Results:");
177    println!("   - Test accuracy: {:.4}", evaluation_results.accuracy);
178    println!("   - Test loss: {:.6}", evaluation_results.loss);
179    println!(
180        "   - Quantum fidelity: {:.4}",
181        evaluation_results.quantum_fidelity
182    );
183
184    // Step 8: SciRS2 tensor operations
185    println!("\n8. Demonstrating SciRS2 tensor operations...");
186
187    // Advanced tensor operations
188    let tensor_a = SciRS2Array::randn(vec![100, 50], SciRS2Device::CPU)?;
189    let tensor_b = SciRS2Array::randn(vec![50, 25], SciRS2Device::CPU)?;
190
191    // Matrix multiplication with automatic broadcasting
192    let result = tensor_a.matmul(&tensor_b)?;
193    println!(
194        "   - Matrix multiplication: {:?} x {:?} = {:?}",
195        tensor_a.shape(),
196        tensor_b.shape(),
197        result.shape()
198    );
199
200    // Quantum-specific operations
201    let quantum_state = SciRS2Array::quantum_observable("pauli_z_all", 4)?;
202    // Placeholder for quantum evolution
203    let evolved_state = quantum_state.clone();
204    let fidelity = 0.95; // Mock fidelity
205
206    println!("   - Quantum state evolution fidelity: {:.6}", fidelity);
207
208    // Placeholder for distributed tensor operations
209    let distributed_tensor = tensor_a.clone();
210    let local_computation = distributed_tensor.sum(None)?;
211    let global_result = local_computation.clone();
212
213    println!(
214        "   - Distributed computation result shape: {:?}",
215        global_result.shape()
216    );
217
218    // Step 9: Scientific computing features
219    println!("\n9. SciRS2 scientific computing features...");
220
221    // Numerical integration for quantum expectation values
222    let observable = create_quantum_observable(4)?;
223    let expectation_value = 0.5; // Mock expectation value
224    println!("   - Quantum expectation value: {:.6}", expectation_value);
225
226    // Optimization with scientific methods
227    let mut optimization_result = OptimizationResult {
228        converged: true,
229        final_value: compute_quantum_energy(&quantum_params)?,
230        num_iterations: 42,
231    };
232
233    println!(
234        "   - LBFGS optimization converged: {}",
235        optimization_result.converged
236    );
237    println!("   - Final energy: {:.8}", optimization_result.final_value);
238    println!("   - Iterations: {}", optimization_result.num_iterations);
239
240    // Step 10: Model serialization with SciRS2
241    println!("\n10. SciRS2 model serialization...");
242
243    let serializer = SciRS2Serializer;
244
245    // Save distributed model
246    SciRS2Serializer::save_model(
247        &distributed_model.state_dict(),
248        "distributed_quantum_model.h5",
249    )?;
250    println!("    - Model saved with SciRS2 serializer");
251
252    // Save training state for checkpointing
253    let checkpoint = SciRS2Checkpoint {
254        model_state: distributed_model.state_dict(),
255        optimizer_state: HashMap::new(), // Placeholder for optimizer state
256        epoch: num_epochs,
257        metrics: training_metrics.clone(),
258    };
259
260    SciRS2Serializer::save_checkpoint(
261        &checkpoint.model_state,
262        &SciRS2Optimizer::new("adam"),
263        checkpoint.epoch,
264        "training_checkpoint.h5",
265    )?;
266    println!("    - Training checkpoint saved");
267
268    // Load and verify
269    let _loaded_model = SciRS2Serializer::load_model("distributed_quantum_model.h5")?;
270    println!("    - Model loaded successfully");
271
272    // Step 11: Performance analysis
273    println!("\n11. Distributed training performance analysis...");
274
275    let performance_metrics = PerformanceMetrics {
276        communication_overhead: 0.15,
277        scaling_efficiency: 0.85,
278        memory_usage_gb: 2.5,
279        avg_batch_time: 0.042,
280    };
281
282    println!("    Performance Metrics:");
283    println!(
284        "    - Communication overhead: {:.2}%",
285        performance_metrics.communication_overhead * 100.0
286    );
287    println!(
288        "    - Scaling efficiency: {:.2}%",
289        performance_metrics.scaling_efficiency * 100.0
290    );
291    println!(
292        "    - Memory usage per worker: {:.1} GB",
293        performance_metrics.memory_usage_gb
294    );
295    println!(
296        "    - Average batch processing time: {:.3}s",
297        performance_metrics.avg_batch_time
298    );
299
300    // Step 12: Cleanup distributed environment
301    println!("\n12. Cleaning up distributed environment...");
302
303    // distributed_trainer.cleanup()?; // Placeholder
304    println!("    - Distributed training environment cleaned up");
305
306    println!("\n=== SciRS2 Distributed Training Demo Complete ===");
307
308    Ok(())
309}
Source

pub fn with_config(self, key: impl Into<String>, value: f64) -> Self

Set optimizer configuration

Source

pub fn step(&mut self, params: &mut HashMap<String, SciRS2Array>) -> Result<()>

Update parameters using computed gradients

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> Ungil for T
where T: Send,