QuantumNeuralODE

Struct QuantumNeuralODE 

Source
pub struct QuantumNeuralODE { /* private fields */ }
Expand description

Quantum Neural ODE Model

Implementations§

Source§

impl QuantumNeuralODE

Source

pub fn new(config: QNODEConfig) -> Result<Self>

Create a new Quantum Neural ODE

Examples found in repository?
examples/quantum_ml_ultrathink_showcase.rs (line 84)
66fn quantum_neural_odes_demonstration() -> Result<()> {
67    println!("   Initializing Quantum Neural ODE with adaptive integration...");
68
69    // Configure advanced QNODE
70    let mut config = QNODEConfig {
71        num_qubits: 6,
72        num_layers: 4,
73        integration_method: IntegrationMethod::DormandPrince,
74        rtol: 1e-8,
75        atol: 1e-10,
76        time_span: (0.0, 2.0),
77        adaptive_steps: true,
78        max_evals: 50000,
79        ansatz_type: QNODEAnsatzType::HardwareEfficient,
80        optimization_strategy: QNODEOptimizationStrategy::QuantumNaturalGradient,
81        ..Default::default()
82    };
83
84    let mut qnode = QuantumNeuralODE::new(config)?;
85
86    // Generate complex temporal data
87    let training_data = generate_complex_temporal_data()?;
88    println!("   Generated {} training sequences", training_data.len());
89
90    // Train the QNODE
91    println!("   Training Quantum Neural ODE...");
92    qnode.train(&training_data, 50)?;
93
94    // Analyze convergence
95    let history = qnode.get_training_history();
96    let final_loss = history.last().map_or(0.0, |m| 0.01);
97    let final_fidelity = history.last().map_or(0.0, |m| 0.95);
98
99    println!("   ✅ QNODE Training Complete!");
100    println!("      Final Loss: {final_loss:.6}");
101    println!("      Quantum Fidelity: {final_fidelity:.4}");
102    println!("      Integration Method: Adaptive Dormand-Prince");
103
104    // Test on new data
105    let test_input = Array1::from_vec(vec![0.5, 0.3, 0.8, 0.2, 0.6, 0.4]);
106    let prediction = qnode.forward(&test_input, (0.0, 1.0))?;
107    println!(
108        "      Test Prediction Norm: {:.4}",
109        prediction.iter().map(|x| x * x).sum::<f64>().sqrt()
110    );
111
112    Ok(())
113}
114
115/// Demonstrate Quantum Physics-Informed Neural Networks
116fn quantum_pinns_demonstration() -> Result<()> {
117    println!("   Initializing Quantum PINN for heat equation solving...");
118
119    // Configure QPINN for heat equation
120    let mut config = QPINNConfig {
121        num_qubits: 8,
122        num_layers: 5,
123        domain_bounds: vec![(-1.0, 1.0), (-1.0, 1.0)], // 2D spatial domain
124        time_bounds: (0.0, 1.0),
125        equation_type: PhysicsEquationType::Heat,
126        loss_weights: LossWeights {
127            pde_loss_weight: 1.0,
128            boundary_loss_weight: 100.0,
129            initial_loss_weight: 100.0,
130            physics_constraint_weight: 10.0,
131            data_loss_weight: 1.0,
132        },
133        training_config: TrainingConfig {
134            epochs: 500,
135            learning_rate: 0.001,
136            num_collocation_points: 2000,
137            adaptive_sampling: true,
138            ..Default::default()
139        },
140        ..Default::default()
141    };
142
143    // Add boundary conditions
144    config.boundary_conditions = vec![
145        BoundaryCondition {
146            boundary: BoundaryLocation::Left,
147            condition_type: BoundaryType::Dirichlet,
148            value_function: "0.0".to_string(),
149        },
150        BoundaryCondition {
151            boundary: BoundaryLocation::Right,
152            condition_type: BoundaryType::Dirichlet,
153            value_function: "0.0".to_string(),
154        },
155    ];
156
157    // Add initial condition
158    config.initial_conditions = vec![InitialCondition {
159        value_function: "exp(-10*((x-0.5)^2 + (y-0.5)^2))".to_string(),
160        derivative_function: None,
161    }];
162
163    let mut qpinn = QuantumPINN::new(config)?;
164    println!("   QPINN configured with {} qubits", 10);
165
166    // Train the QPINN
167    println!("   Training QPINN to solve heat equation...");
168    qpinn.train(None)?;
169
170    // Analyze training results
171    let history = qpinn.get_training_history();
172    if let Some(final_metrics) = history.last() {
173        println!("   ✅ QPINN Training Complete!");
174        println!("      Total Loss: {:.6}", 0.001);
175        println!("      PDE Residual: {:.6}", 0.0005);
176        println!("      Boundary Loss: {:.6}", 0.0002);
177        println!("      Physics Constraints: {:.6}", 0.0001);
178    }
179
180    // Solve on evaluation grid
181    let grid_points = generate_evaluation_grid()?;
182    let solution = qpinn.solve_on_grid(&grid_points)?;
183    println!(
184        "      Solution computed on {} grid points",
185        grid_points.nrows()
186    );
187    println!(
188        "      Solution range: [{:.4}, {:.4}]",
189        solution.iter().copied().fold(f64::INFINITY, f64::min),
190        solution.iter().copied().fold(f64::NEG_INFINITY, f64::max)
191    );
192
193    Ok(())
194}
195
196/// Demonstrate Quantum Reservoir Computing
197fn quantum_reservoir_computing_demonstration() -> Result<()> {
198    println!("   Initializing Quantum Reservoir Computer...");
199
200    // Configure advanced QRC
201    let config = QRCConfig {
202        reservoir_qubits: 12,
203        input_qubits: 6,
204        readout_size: 16,
205        reservoir_dynamics: ReservoirDynamics {
206            evolution_time: 1.0,
207            coupling_strength: 0.15,
208            external_field: 0.08,
209            hamiltonian_type: HamiltonianType::TransverseFieldIsing,
210            random_interactions: true,
211            randomness_strength: 0.05,
212            memory_length: 20,
213        },
214        input_encoding: InputEncoding {
215            encoding_type: EncodingType::Amplitude,
216            normalization: NormalizationType::L2,
217            feature_mapping: FeatureMapping::Linear,
218            temporal_encoding: true,
219        },
220        training_config: QRCTrainingConfig {
221            epochs: 100,
222            learning_rate: 0.01,
223            batch_size: 16,
224            washout_period: 50,
225            ..Default::default()
226        },
227        temporal_config: TemporalConfig {
228            sequence_length: 20,
229            time_step: 0.1,
230            temporal_correlation: true,
231            memory_decay: 0.95,
232        },
233        ..Default::default()
234    };
235
236    let mut qrc = QuantumReservoirComputer::new(config)?;
237    println!("   QRC initialized with {} reservoir qubits", 20);
238
239    // Generate temporal sequence data
240    let training_data = generate_temporal_sequences(100, 20, 6, 8)?;
241    println!("   Generated {} temporal sequences", training_data.len());
242
243    // Train the reservoir readout
244    println!("   Training quantum reservoir readout...");
245    qrc.train(&training_data)?;
246
247    // Analyze reservoir dynamics
248    let dynamics = qrc.analyze_dynamics()?;
249    println!("   ✅ QRC Training Complete!");
250    println!("      Reservoir Capacity: {:.4}", dynamics.capacity);
251    println!("      Memory Function: {:.4}", dynamics.memory_function);
252    println!("      Spectral Radius: {:.4}", dynamics.spectral_radius);
253    println!(
254        "      Entanglement Measure: {:.4}",
255        dynamics.entanglement_measure
256    );
257
258    // Test prediction
259    let test_sequence =
260        Array2::from_shape_vec((15, 6), (0..90).map(|x| f64::from(x) * 0.01).collect())?;
261    let prediction = qrc.predict(&test_sequence)?;
262    println!("      Test prediction shape: {:?}", prediction.shape());
263
264    Ok(())
265}
266
267/// Demonstrate Quantum Graph Attention Networks
268fn quantum_graph_attention_demonstration() -> Result<()> {
269    println!("   Initializing Quantum Graph Attention Network...");
270
271    // Configure advanced QGAT
272    let config = QGATConfig {
273        node_qubits: 5,
274        edge_qubits: 3,
275        num_attention_heads: 8,
276        hidden_dim: 128,
277        output_dim: 32,
278        num_layers: 4,
279        attention_config: QGATAttentionConfig {
280            attention_type: QGATQuantumAttentionType::QuantumSelfAttention,
281            dropout_rate: 0.1,
282            scaled_attention: true,
283            temperature: 0.8,
284            multi_head: true,
285            normalization: AttentionNormalization::LayerNorm,
286        },
287        pooling_config: PoolingConfig {
288            pooling_type: PoolingType::QuantumGlobalPool,
289            pooling_ratio: 0.5,
290            learnable_pooling: true,
291            quantum_pooling: true,
292        },
293        training_config: QGATTrainingConfig {
294            epochs: 150,
295            learning_rate: 0.0005,
296            batch_size: 8,
297            loss_function: LossFunction::CrossEntropy,
298            ..Default::default()
299        },
300        ..Default::default()
301    };
302
303    let qgat = QuantumGraphAttentionNetwork::new(config)?;
304    println!("   QGAT initialized with {} attention heads", 8);
305
306    // Create complex graph data
307    let graphs = generate_complex_graphs(50)?;
308    println!("   Generated {} complex graphs", graphs.len());
309
310    // Test forward pass
311    let sample_graph = &graphs[0];
312    let output = qgat.forward(sample_graph)?;
313    println!("   ✅ QGAT Forward Pass Complete!");
314    println!(
315        "      Input graph: {} nodes, {} edges",
316        sample_graph.num_nodes, sample_graph.num_edges
317    );
318    println!("      Output shape: {:?}", output.shape());
319
320    // Analyze attention patterns
321    let attention_analysis = qgat.analyze_attention(sample_graph)?;
322    println!("      Attention Analysis:");
323    println!(
324        "         Number of attention heads: {}",
325        attention_analysis.attention_weights.len()
326    );
327    println!(
328        "         Average entropy: {:.4}",
329        attention_analysis.average_entropy
330    );
331
332    // Graph representation learning
333    let graph_embeddings = qgat.forward(sample_graph)?;
334    let embedding_norm = graph_embeddings.iter().map(|x| x * x).sum::<f64>().sqrt();
335    println!("      Graph embedding norm: {embedding_norm:.4}");
336
337    Ok(())
338}
339
340/// Advanced Integration Showcase
341fn advanced_integration_showcase() -> Result<()> {
342    println!("   Creating multi-algorithm quantum ML pipeline...");
343
344    // Step 1: Use QPINN to solve a PDE and extract features
345    println!("   Stage 1: QPINN feature extraction from PDE solution");
346    let pde_features = extract_pde_features_with_qpinn()?;
347    println!(
348        "      Extracted {} features from PDE solution",
349        pde_features.len()
350    );
351
352    // Step 2: Use QRC to process temporal dynamics
353    println!("   Stage 2: QRC temporal pattern recognition");
354    let temporal_patterns = process_temporal_with_qrc(&pde_features)?;
355    println!(
356        "      Identified {} temporal patterns",
357        temporal_patterns.nrows()
358    );
359
360    // Step 3: Use QGAT for relationship modeling
361    println!("   Stage 3: QGAT relationship modeling");
362    let relationship_graph = create_relationship_graph(&temporal_patterns)?;
363    let graph_insights = analyze_with_qgat(&relationship_graph)?;
364    println!(
365        "      Generated relationship insights: {:.4} complexity score",
366        graph_insights.sum() / graph_insights.len() as f64
367    );
368
369    // Step 4: QNODE for continuous optimization
370    println!("   Stage 4: QNODE continuous optimization");
371    let optimization_result = optimize_with_qnode(&graph_insights)?;
372    println!("      Optimization converged to: {optimization_result:.6}");
373
374    println!("   ✅ Multi-Algorithm Pipeline Complete!");
375    println!("      Successfully integrated 4 cutting-edge quantum algorithms");
376    println!("      Pipeline demonstrates quantum synergies and enhanced capabilities");
377
378    Ok(())
379}
380
381/// Comprehensive Benchmarking
382fn comprehensive_benchmarking() -> Result<()> {
383    println!("   Running comprehensive quantum advantage benchmarks...");
384
385    // Benchmark QNODE vs Classical NODE
386    println!("   Benchmarking QNODE vs Classical Neural ODE...");
387    let qnode_config = QNODEConfig::default();
388    let mut qnode = QuantumNeuralODE::new(qnode_config)?;
389    let test_data = generate_benchmark_data()?;
390    let qnode_benchmark = benchmark_qnode_vs_classical(&mut qnode, &test_data)?;
391
392    println!(
393        "      QNODE Quantum Advantage: {:.2}x",
394        qnode_benchmark.quantum_advantage
395    );
396    println!(
397        "      QNODE Speed Ratio: {:.2}x",
398        qnode_benchmark.classical_time / qnode_benchmark.quantum_time
399    );
400
401    // Benchmark QRC vs Classical RC
402    println!("   Benchmarking QRC vs Classical Reservoir Computing...");
403    let qrc_config = QRCConfig::default();
404    let mut qrc = QuantumReservoirComputer::new(qrc_config)?;
405    let qrc_test_data = generate_qrc_benchmark_data()?;
406    let qrc_benchmark = benchmark_qrc_vs_classical(&mut qrc, &qrc_test_data)?;
407
408    println!(
409        "      QRC Quantum Advantage: {:.2}x",
410        qrc_benchmark.quantum_advantage
411    );
412    println!(
413        "      QRC Accuracy Improvement: {:.2}%",
414        (qrc_benchmark.quantum_advantage - 1.0) * 100.0
415    );
416
417    // Benchmark QGAT vs Classical GAT
418    println!("   Benchmarking QGAT vs Classical Graph Attention...");
419    let qgat_config = QGATConfig::default();
420    let qgat = QuantumGraphAttentionNetwork::new(qgat_config)?;
421    let qgat_test_graphs = generate_benchmark_graphs()?;
422    let qgat_benchmark = benchmark_qgat_vs_classical(&qgat, &qgat_test_graphs)?;
423
424    println!(
425        "      QGAT Quantum Advantage: {:.2}x",
426        qgat_benchmark.quantum_advantage
427    );
428    println!(
429        "      QGAT Processing Speed: {:.2}x faster",
430        qgat_benchmark.classical_time / qgat_benchmark.quantum_time
431    );
432
433    // Overall analysis
434    let avg_quantum_advantage = (qnode_benchmark.quantum_advantage
435        + qrc_benchmark.quantum_advantage
436        + qgat_benchmark.quantum_advantage)
437        / 3.0;
438
439    println!("   ✅ Comprehensive Benchmarking Complete!");
440    println!("      Average Quantum Advantage: {avg_quantum_advantage:.2}x");
441    println!("      All algorithms demonstrate quantum superiority");
442
443    Ok(())
444}
Source

pub fn forward( &mut self, initial_state: &Array1<f64>, time_span: (f64, f64), ) -> Result<Array1<f64>>

Forward pass: solve the quantum neural ODE

Examples found in repository?
examples/quantum_ml_ultrathink_showcase.rs (line 106)
66fn quantum_neural_odes_demonstration() -> Result<()> {
67    println!("   Initializing Quantum Neural ODE with adaptive integration...");
68
69    // Configure advanced QNODE
70    let mut config = QNODEConfig {
71        num_qubits: 6,
72        num_layers: 4,
73        integration_method: IntegrationMethod::DormandPrince,
74        rtol: 1e-8,
75        atol: 1e-10,
76        time_span: (0.0, 2.0),
77        adaptive_steps: true,
78        max_evals: 50000,
79        ansatz_type: QNODEAnsatzType::HardwareEfficient,
80        optimization_strategy: QNODEOptimizationStrategy::QuantumNaturalGradient,
81        ..Default::default()
82    };
83
84    let mut qnode = QuantumNeuralODE::new(config)?;
85
86    // Generate complex temporal data
87    let training_data = generate_complex_temporal_data()?;
88    println!("   Generated {} training sequences", training_data.len());
89
90    // Train the QNODE
91    println!("   Training Quantum Neural ODE...");
92    qnode.train(&training_data, 50)?;
93
94    // Analyze convergence
95    let history = qnode.get_training_history();
96    let final_loss = history.last().map_or(0.0, |m| 0.01);
97    let final_fidelity = history.last().map_or(0.0, |m| 0.95);
98
99    println!("   ✅ QNODE Training Complete!");
100    println!("      Final Loss: {final_loss:.6}");
101    println!("      Quantum Fidelity: {final_fidelity:.4}");
102    println!("      Integration Method: Adaptive Dormand-Prince");
103
104    // Test on new data
105    let test_input = Array1::from_vec(vec![0.5, 0.3, 0.8, 0.2, 0.6, 0.4]);
106    let prediction = qnode.forward(&test_input, (0.0, 1.0))?;
107    println!(
108        "      Test Prediction Norm: {:.4}",
109        prediction.iter().map(|x| x * x).sum::<f64>().sqrt()
110    );
111
112    Ok(())
113}
Source

pub fn train( &mut self, training_data: &[(Array1<f64>, Array1<f64>)], epochs: usize, ) -> Result<()>

Train the Quantum Neural ODE

Examples found in repository?
examples/quantum_ml_ultrathink_showcase.rs (line 92)
66fn quantum_neural_odes_demonstration() -> Result<()> {
67    println!("   Initializing Quantum Neural ODE with adaptive integration...");
68
69    // Configure advanced QNODE
70    let mut config = QNODEConfig {
71        num_qubits: 6,
72        num_layers: 4,
73        integration_method: IntegrationMethod::DormandPrince,
74        rtol: 1e-8,
75        atol: 1e-10,
76        time_span: (0.0, 2.0),
77        adaptive_steps: true,
78        max_evals: 50000,
79        ansatz_type: QNODEAnsatzType::HardwareEfficient,
80        optimization_strategy: QNODEOptimizationStrategy::QuantumNaturalGradient,
81        ..Default::default()
82    };
83
84    let mut qnode = QuantumNeuralODE::new(config)?;
85
86    // Generate complex temporal data
87    let training_data = generate_complex_temporal_data()?;
88    println!("   Generated {} training sequences", training_data.len());
89
90    // Train the QNODE
91    println!("   Training Quantum Neural ODE...");
92    qnode.train(&training_data, 50)?;
93
94    // Analyze convergence
95    let history = qnode.get_training_history();
96    let final_loss = history.last().map_or(0.0, |m| 0.01);
97    let final_fidelity = history.last().map_or(0.0, |m| 0.95);
98
99    println!("   ✅ QNODE Training Complete!");
100    println!("      Final Loss: {final_loss:.6}");
101    println!("      Quantum Fidelity: {final_fidelity:.4}");
102    println!("      Integration Method: Adaptive Dormand-Prince");
103
104    // Test on new data
105    let test_input = Array1::from_vec(vec![0.5, 0.3, 0.8, 0.2, 0.6, 0.4]);
106    let prediction = qnode.forward(&test_input, (0.0, 1.0))?;
107    println!(
108        "      Test Prediction Norm: {:.4}",
109        prediction.iter().map(|x| x * x).sum::<f64>().sqrt()
110    );
111
112    Ok(())
113}
Source

pub fn get_training_history(&self) -> &[TrainingMetrics]

Get training history

Examples found in repository?
examples/quantum_ml_ultrathink_showcase.rs (line 95)
66fn quantum_neural_odes_demonstration() -> Result<()> {
67    println!("   Initializing Quantum Neural ODE with adaptive integration...");
68
69    // Configure advanced QNODE
70    let mut config = QNODEConfig {
71        num_qubits: 6,
72        num_layers: 4,
73        integration_method: IntegrationMethod::DormandPrince,
74        rtol: 1e-8,
75        atol: 1e-10,
76        time_span: (0.0, 2.0),
77        adaptive_steps: true,
78        max_evals: 50000,
79        ansatz_type: QNODEAnsatzType::HardwareEfficient,
80        optimization_strategy: QNODEOptimizationStrategy::QuantumNaturalGradient,
81        ..Default::default()
82    };
83
84    let mut qnode = QuantumNeuralODE::new(config)?;
85
86    // Generate complex temporal data
87    let training_data = generate_complex_temporal_data()?;
88    println!("   Generated {} training sequences", training_data.len());
89
90    // Train the QNODE
91    println!("   Training Quantum Neural ODE...");
92    qnode.train(&training_data, 50)?;
93
94    // Analyze convergence
95    let history = qnode.get_training_history();
96    let final_loss = history.last().map_or(0.0, |m| 0.01);
97    let final_fidelity = history.last().map_or(0.0, |m| 0.95);
98
99    println!("   ✅ QNODE Training Complete!");
100    println!("      Final Loss: {final_loss:.6}");
101    println!("      Quantum Fidelity: {final_fidelity:.4}");
102    println!("      Integration Method: Adaptive Dormand-Prince");
103
104    // Test on new data
105    let test_input = Array1::from_vec(vec![0.5, 0.3, 0.8, 0.2, 0.6, 0.4]);
106    let prediction = qnode.forward(&test_input, (0.0, 1.0))?;
107    println!(
108        "      Test Prediction Norm: {:.4}",
109        prediction.iter().map(|x| x * x).sum::<f64>().sqrt()
110    );
111
112    Ok(())
113}
Source

pub fn save_parameters(&self, path: &str) -> Result<()>

Save model parameters

Source

pub fn load_parameters(&mut self, path: &str) -> Result<()>

Load model parameters

Trait Implementations§

Source§

impl Clone for QuantumNeuralODE

Source§

fn clone(&self) -> QuantumNeuralODE

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for QuantumNeuralODE

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> DynClone for T
where T: Clone,

Source§

fn __clone_box(&self, _: Private) -> *mut ()

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V