1use std::collections::HashMap;
28
29use axonml_autograd::Variable;
30use axonml_tensor::Tensor;
31
32use crate::layers::Linear;
33use crate::module::Module;
34use crate::parameter::Parameter;
35
36pub struct DifferentialAttention {
66 q_proj: Linear,
68 k_proj: Linear,
70 v_proj: Linear,
72 out_proj: Linear,
74 lambda: Parameter,
76 embed_dim: usize,
78 num_heads: usize,
80 head_dim: usize,
82 half_head_dim: usize,
84 scale: f32,
86}
87
88impl DifferentialAttention {
89 pub fn new(embed_dim: usize, num_heads: usize) -> Self {
91 Self::with_lambda(embed_dim, num_heads, 0.05)
92 }
93
94 pub fn with_lambda(embed_dim: usize, num_heads: usize, lambda_init: f32) -> Self {
96 assert!(
97 embed_dim % num_heads == 0,
98 "embed_dim ({embed_dim}) must be divisible by num_heads ({num_heads})"
99 );
100
101 let head_dim = embed_dim / num_heads;
102 assert!(
103 head_dim % 2 == 0,
104 "head_dim ({head_dim}) must be even for Q/K splitting"
105 );
106
107 let half_head_dim = head_dim / 2;
108 let scale = (half_head_dim as f32).sqrt().recip();
109
110 let lambda_tensor = Tensor::from_vec(vec![lambda_init], &[1]).unwrap();
112
113 Self {
114 q_proj: Linear::new(embed_dim, embed_dim),
115 k_proj: Linear::new(embed_dim, embed_dim),
116 v_proj: Linear::new(embed_dim, embed_dim),
117 out_proj: Linear::new(embed_dim, embed_dim),
118 lambda: Parameter::named("lambda", lambda_tensor, true),
119 embed_dim,
120 num_heads,
121 head_dim,
122 half_head_dim,
123 scale,
124 }
125 }
126
127 pub fn attention(
135 &self,
136 query: &Variable,
137 key: &Variable,
138 value: &Variable,
139 _attn_mask: Option<&Variable>,
140 ) -> Variable {
141 let q_shape = query.shape();
142 let batch_size = q_shape[0];
143 let tgt_len = q_shape[1];
144 let src_len = key.shape()[1];
145
146 let q = self.q_proj.forward(query);
148 let k = self.k_proj.forward(key);
149 let v = self.v_proj.forward(value);
150
151 let q = q
153 .reshape(&[batch_size, tgt_len, self.num_heads, self.head_dim])
154 .transpose(1, 2);
155 let k = k
156 .reshape(&[batch_size, src_len, self.num_heads, self.head_dim])
157 .transpose(1, 2);
158 let v = v
159 .reshape(&[batch_size, src_len, self.num_heads, self.head_dim])
160 .transpose(1, 2);
161
162 let q1 = q.narrow(3, 0, self.half_head_dim);
165 let q2 = q.narrow(3, self.half_head_dim, self.half_head_dim);
166
167 let k1 = k.narrow(3, 0, self.half_head_dim);
169 let k2 = k.narrow(3, self.half_head_dim, self.half_head_dim);
170
171 let k1_t = k1.transpose(2, 3);
174 let scores1 = q1.matmul(&k1_t).mul_scalar(self.scale);
175 let attn1 = scores1.softmax(-1);
176
177 let k2_t = k2.transpose(2, 3);
179 let scores2 = q2.matmul(&k2_t).mul_scalar(self.scale);
180 let attn2 = scores2.softmax(-1);
181
182 let lambda_var = self.lambda.variable();
184 let attn2_scaled = self.broadcast_mul_scalar(&attn2, &lambda_var);
187
188 let neg_attn2 = attn2_scaled.mul_scalar(-1.0);
190 let diff_attn = attn1.add_var(&neg_attn2);
191
192 let attn_output = diff_attn.matmul(&v);
194
195 let attn_output =
197 attn_output
198 .transpose(1, 2)
199 .reshape(&[batch_size, tgt_len, self.embed_dim]);
200
201 self.out_proj.forward(&attn_output)
203 }
204
205 fn broadcast_mul_scalar(&self, attn: &Variable, lambda: &Variable) -> Variable {
210 let lambda_val = lambda.data().to_vec()[0];
213 let attn_shape = attn.shape();
221 let total = attn_shape.iter().product::<usize>();
222 let lambda_expanded = Tensor::from_vec(vec![lambda_val; total], &attn_shape).unwrap();
223 let lambda_var = Variable::new(lambda_expanded, false);
224 attn.mul_var(&lambda_var)
225 }
226
227 pub fn lambda_value(&self) -> f32 {
229 self.lambda.data().to_vec()[0]
230 }
231
232 pub fn embed_dim(&self) -> usize {
234 self.embed_dim
235 }
236
237 pub fn num_heads(&self) -> usize {
239 self.num_heads
240 }
241}
242
243impl Module for DifferentialAttention {
244 fn forward(&self, input: &Variable) -> Variable {
245 self.attention(input, input, input, None)
247 }
248
249 fn parameters(&self) -> Vec<Parameter> {
250 let mut params = Vec::new();
251 params.extend(self.q_proj.parameters());
252 params.extend(self.k_proj.parameters());
253 params.extend(self.v_proj.parameters());
254 params.extend(self.out_proj.parameters());
255 params.push(self.lambda.clone());
256 params
257 }
258
259 fn named_parameters(&self) -> HashMap<String, Parameter> {
260 let mut params = HashMap::new();
261 for (name, param) in self.q_proj.named_parameters() {
262 params.insert(format!("q_proj.{name}"), param);
263 }
264 for (name, param) in self.k_proj.named_parameters() {
265 params.insert(format!("k_proj.{name}"), param);
266 }
267 for (name, param) in self.v_proj.named_parameters() {
268 params.insert(format!("v_proj.{name}"), param);
269 }
270 for (name, param) in self.out_proj.named_parameters() {
271 params.insert(format!("out_proj.{name}"), param);
272 }
273 params.insert("lambda".to_string(), self.lambda.clone());
274 params
275 }
276
277 fn name(&self) -> &'static str {
278 "DifferentialAttention"
279 }
280}
281
282impl std::fmt::Debug for DifferentialAttention {
283 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
284 f.debug_struct("DifferentialAttention")
285 .field("embed_dim", &self.embed_dim)
286 .field("num_heads", &self.num_heads)
287 .field("head_dim", &self.head_dim)
288 .field("half_head_dim", &self.half_head_dim)
289 .field("lambda", &self.lambda_value())
290 .finish()
291 }
292}
293
294#[cfg(test)]
299mod tests {
300 use super::*;
301
302 #[test]
303 fn test_diff_attention_creation() {
304 let attn = DifferentialAttention::new(64, 4);
305 assert_eq!(attn.embed_dim(), 64);
306 assert_eq!(attn.num_heads(), 4);
307 assert_eq!(attn.head_dim, 16);
308 assert_eq!(attn.half_head_dim, 8);
309 assert!((attn.lambda_value() - 0.05).abs() < 1e-6);
310 }
311
312 #[test]
313 fn test_diff_attention_forward() {
314 let attn = DifferentialAttention::new(64, 4);
315 let input = Variable::new(
316 Tensor::from_vec(vec![0.1; 2 * 10 * 64], &[2, 10, 64]).unwrap(),
317 false,
318 );
319 let output = attn.forward(&input);
320 assert_eq!(output.shape(), vec![2, 10, 64]);
321 }
322
323 #[test]
324 fn test_diff_attention_cross() {
325 let attn = DifferentialAttention::new(64, 4);
326 let query = Variable::new(
327 Tensor::from_vec(vec![0.1; 2 * 5 * 64], &[2, 5, 64]).unwrap(),
328 false,
329 );
330 let kv = Variable::new(
331 Tensor::from_vec(vec![0.2; 2 * 10 * 64], &[2, 10, 64]).unwrap(),
332 false,
333 );
334 let output = attn.attention(&query, &kv, &kv, None);
335 assert_eq!(output.shape(), vec![2, 5, 64]);
336 }
337
338 #[test]
339 fn test_diff_attention_parameters() {
340 let attn = DifferentialAttention::new(64, 4);
341 let params = attn.parameters();
342 assert_eq!(params.len(), 9);
344 }
345
346 #[test]
347 fn test_diff_attention_lambda_in_named_params() {
348 let attn = DifferentialAttention::new(64, 4);
349 let named = attn.named_parameters();
350 assert!(named.contains_key("lambda"));
351 assert!(named.contains_key("q_proj.weight"));
352 assert!(named.contains_key("out_proj.bias"));
353 }
354
355 #[test]
356 fn test_diff_attention_backward() {
357 use axonml_autograd::backward;
358
359 let attn = DifferentialAttention::new(32, 2);
360 let input = Variable::new(
361 Tensor::from_vec(vec![0.1; 2 * 4 * 32], &[2, 4, 32]).unwrap(),
362 true,
363 );
364 let output = attn.forward(&input);
365 assert_eq!(output.shape(), vec![2, 4, 32]);
366
367 let loss = output.sum();
368 let ones = Tensor::from_vec(vec![1.0f32], &[1]).unwrap();
369 backward(&loss, &ones);
370
371 let grad = input.grad();
372 assert!(grad.is_some(), "Input gradient should exist");
373 let grad_data = grad.unwrap();
374 assert_eq!(grad_data.shape(), &[2, 4, 32]);
375
376 let grad_vec = grad_data.to_vec();
377 let non_zero = grad_vec.iter().any(|&v| v.abs() > 1e-10);
378 assert!(non_zero, "Gradients should be non-zero");
379 }
380
381 #[test]
382 fn test_diff_attention_custom_lambda() {
383 let attn = DifferentialAttention::with_lambda(64, 4, 0.1);
384 assert!((attn.lambda_value() - 0.1).abs() < 1e-6);
385 }
386}