pub struct QuantumMultiHeadAttention { /* private fields */ }
Expand description
Quantum multi-head attention module
Implementations§
Source§impl QuantumMultiHeadAttention
impl QuantumMultiHeadAttention
Sourcepub fn new(
num_heads: usize,
model_dim: usize,
attention_type: QuantumAttentionType,
num_qubits: usize,
) -> Result<Self>
pub fn new( num_heads: usize, model_dim: usize, attention_type: QuantumAttentionType, num_qubits: usize, ) -> Result<Self>
Create new quantum multi-head attention module
Examples found in repository?
examples/quantum_transformer.rs (line 132)
106fn attention_mechanisms_demo() -> Result<()> {
107 println!(" Testing various quantum attention mechanisms...");
108
109 let attention_types = vec![
110 ("Full Quantum", QuantumAttentionType::FullQuantum),
111 (
112 "Hybrid Quantum-Classical",
113 QuantumAttentionType::HybridQuantumClassical,
114 ),
115 (
116 "Variational Quantum",
117 QuantumAttentionType::VariationalQuantum,
118 ),
119 (
120 "Quantum Enhanced Multi-Head",
121 QuantumAttentionType::QuantumEnhancedMultiHead,
122 ),
123 (
124 "Quantum Self-Attention",
125 QuantumAttentionType::QuantumSelfAttention,
126 ),
127 ];
128
129 for (name, attention_type) in attention_types {
130 println!("\n --- {} Attention ---", name);
131
132 let attention = QuantumMultiHeadAttention::new(4, 256, attention_type, 8)?;
133 println!(
134 " Created attention module: {} heads, {} model dim",
135 4, 256
136 ); // Fixed values since fields are private
137
138 // Test forward pass
139 let batch_size = 2;
140 let seq_len = 10;
141 let model_dim = 256;
142
143 let query = Array3::from_shape_fn((batch_size, seq_len, model_dim), |(b, s, d)| {
144 0.1 * (b as f64 + s as f64 * 0.1 + d as f64 * 0.01)
145 });
146 let key = query.clone();
147 let value = query.clone();
148
149 let attention_output = attention.forward(&query, &key, &value, None)?;
150
151 println!(
152 " Attention output shape: {:?}",
153 attention_output.output.dim()
154 );
155 println!(
156 " Attention weights shape: {:?}",
157 attention_output.attention_weights.dim()
158 );
159
160 // Analyze quantum attention properties
161 let quantum_info = &attention_output.quantum_info;
162 let avg_entanglement = quantum_info.entanglement_matrix.mean().unwrap_or(0.0);
163 let max_coherence = quantum_info
164 .coherence_scores
165 .iter()
166 .cloned()
167 .fold(f64::NEG_INFINITY, f64::max);
168
169 println!(" Average entanglement: {:.4}", avg_entanglement);
170 println!(" Maximum coherence: {:.4}", max_coherence);
171
172 // Attention pattern analysis
173 let attention_weights = &attention_output.attention_weights;
174 let max_attention = attention_weights
175 .iter()
176 .cloned()
177 .fold(f64::NEG_INFINITY, f64::max);
178 let avg_attention = attention_weights.mean().unwrap_or(0.0);
179
180 println!(" Max attention weight: {:.4}", max_attention);
181 println!(" Average attention: {:.4}", avg_attention);
182
183 // Check attention sparsity
184 let sparsity = attention_weights.iter().filter(|&&x| x < 0.01).count() as f64
185 / attention_weights.len() as f64;
186 println!(" Attention sparsity: {:.1}%", sparsity * 100.0);
187 }
188
189 Ok(())
190}
Sourcepub fn forward(
&self,
query: &Array3<f64>,
key: &Array3<f64>,
value: &Array3<f64>,
attention_mask: Option<&Array3<bool>>,
) -> Result<AttentionOutput>
pub fn forward( &self, query: &Array3<f64>, key: &Array3<f64>, value: &Array3<f64>, attention_mask: Option<&Array3<bool>>, ) -> Result<AttentionOutput>
Forward pass through quantum multi-head attention
Examples found in repository?
examples/quantum_transformer.rs (line 149)
106fn attention_mechanisms_demo() -> Result<()> {
107 println!(" Testing various quantum attention mechanisms...");
108
109 let attention_types = vec![
110 ("Full Quantum", QuantumAttentionType::FullQuantum),
111 (
112 "Hybrid Quantum-Classical",
113 QuantumAttentionType::HybridQuantumClassical,
114 ),
115 (
116 "Variational Quantum",
117 QuantumAttentionType::VariationalQuantum,
118 ),
119 (
120 "Quantum Enhanced Multi-Head",
121 QuantumAttentionType::QuantumEnhancedMultiHead,
122 ),
123 (
124 "Quantum Self-Attention",
125 QuantumAttentionType::QuantumSelfAttention,
126 ),
127 ];
128
129 for (name, attention_type) in attention_types {
130 println!("\n --- {} Attention ---", name);
131
132 let attention = QuantumMultiHeadAttention::new(4, 256, attention_type, 8)?;
133 println!(
134 " Created attention module: {} heads, {} model dim",
135 4, 256
136 ); // Fixed values since fields are private
137
138 // Test forward pass
139 let batch_size = 2;
140 let seq_len = 10;
141 let model_dim = 256;
142
143 let query = Array3::from_shape_fn((batch_size, seq_len, model_dim), |(b, s, d)| {
144 0.1 * (b as f64 + s as f64 * 0.1 + d as f64 * 0.01)
145 });
146 let key = query.clone();
147 let value = query.clone();
148
149 let attention_output = attention.forward(&query, &key, &value, None)?;
150
151 println!(
152 " Attention output shape: {:?}",
153 attention_output.output.dim()
154 );
155 println!(
156 " Attention weights shape: {:?}",
157 attention_output.attention_weights.dim()
158 );
159
160 // Analyze quantum attention properties
161 let quantum_info = &attention_output.quantum_info;
162 let avg_entanglement = quantum_info.entanglement_matrix.mean().unwrap_or(0.0);
163 let max_coherence = quantum_info
164 .coherence_scores
165 .iter()
166 .cloned()
167 .fold(f64::NEG_INFINITY, f64::max);
168
169 println!(" Average entanglement: {:.4}", avg_entanglement);
170 println!(" Maximum coherence: {:.4}", max_coherence);
171
172 // Attention pattern analysis
173 let attention_weights = &attention_output.attention_weights;
174 let max_attention = attention_weights
175 .iter()
176 .cloned()
177 .fold(f64::NEG_INFINITY, f64::max);
178 let avg_attention = attention_weights.mean().unwrap_or(0.0);
179
180 println!(" Max attention weight: {:.4}", max_attention);
181 println!(" Average attention: {:.4}", avg_attention);
182
183 // Check attention sparsity
184 let sparsity = attention_weights.iter().filter(|&&x| x < 0.01).count() as f64
185 / attention_weights.len() as f64;
186 println!(" Attention sparsity: {:.1}%", sparsity * 100.0);
187 }
188
189 Ok(())
190}
Trait Implementations§
Source§impl Clone for QuantumMultiHeadAttention
impl Clone for QuantumMultiHeadAttention
Source§fn clone(&self) -> QuantumMultiHeadAttention
fn clone(&self) -> QuantumMultiHeadAttention
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreAuto Trait Implementations§
impl Freeze for QuantumMultiHeadAttention
impl !RefUnwindSafe for QuantumMultiHeadAttention
impl Send for QuantumMultiHeadAttention
impl Sync for QuantumMultiHeadAttention
impl Unpin for QuantumMultiHeadAttention
impl !UnwindSafe for QuantumMultiHeadAttention
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self
from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self
is actually part of its subset T
(and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset
but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self
to the equivalent element of its superset.