1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
impl OwnedQuantizedModel {
/// Fused matmul into pre-allocated output buffer
pub(crate) fn fused_matmul_into(
&self,
input: &[f32],
weight: &OwnedQuantizedTensor,
output: &mut [f32],
) -> Result<()> {
use crate::quantize::{
fused_q4_0_q8_0_parallel_matvec_into, fused_q4k_parallel_matvec_into,
fused_q5k_parallel_matvec_into, fused_q6k_parallel_matvec_into,
fused_q8_0_q8_0_parallel_matvec_into,
};
let in_dim = weight.in_dim;
let out_dim = weight.out_dim;
let seq_len = input.len() / in_dim;
// Only support single-token case for now (most common in generation)
if seq_len != 1 {
let result = self.fused_matmul(input, weight)?;
output[..result.len()].copy_from_slice(&result);
return Ok(());
}
debug_assert!(
output.len() >= out_dim,
"Output buffer too small: {} < {}",
output.len(),
out_dim
);
match weight.qtype {
GGUF_TYPE_Q4_0 => fused_q4_0_q8_0_parallel_matvec_into(
&weight.data,
input,
in_dim,
&mut output[..out_dim],
),
GGUF_TYPE_Q8_0 => fused_q8_0_q8_0_parallel_matvec_into(
&weight.data,
input,
in_dim,
out_dim,
&mut output[..out_dim],
),
GGUF_TYPE_Q4_K => fused_q4k_parallel_matvec_into(
&weight.data,
input,
in_dim,
out_dim,
&mut output[..out_dim],
),
GGUF_TYPE_Q5_K => fused_q5k_parallel_matvec_into(
&weight.data,
input,
in_dim,
out_dim,
&mut output[..out_dim],
),
GGUF_TYPE_Q6_K => fused_q6k_parallel_matvec_into(
&weight.data,
input,
in_dim,
out_dim,
&mut output[..out_dim],
),
_ => {
let result = self.fused_matmul(input, weight)?;
output[..result.len()].copy_from_slice(&result);
Ok(())
},
}
}
/// Fused gate+up matmul into pre-allocated output buffers (PMAT-FFN-FUSION)
///
/// Computes both gate and up projections in a single rayon dispatch when both
/// weights share the same quantization type. Falls back to `rayon::join` with
/// two separate `fused_matmul_into` calls for mixed types.
pub(crate) fn fused_gate_up_matmul_into(
&self,
input: &[f32],
gate_weight: &OwnedQuantizedTensor,
up_weight: &OwnedQuantizedTensor,
gate_output: &mut [f32],
up_output: &mut [f32],
) -> Result<()> {
use crate::quantize::{
fused_gate_up_q4k_into, fused_gate_up_q5k_into, fused_gate_up_q6k_into,
};
let in_dim = gate_weight.in_dim;
let out_dim = gate_weight.out_dim;
let seq_len = input.len() / in_dim;
// Only fuse for single-token case with matching types and dimensions
if seq_len == 1
&& gate_weight.qtype == up_weight.qtype
&& gate_weight.in_dim == up_weight.in_dim
&& gate_weight.out_dim == up_weight.out_dim
{
match gate_weight.qtype {
GGUF_TYPE_Q4_K => {
return fused_gate_up_q4k_into(
&gate_weight.data,
&up_weight.data,
input,
in_dim,
out_dim,
gate_output,
up_output,
);
}
GGUF_TYPE_Q5_K => {
return fused_gate_up_q5k_into(
&gate_weight.data,
&up_weight.data,
input,
in_dim,
out_dim,
gate_output,
up_output,
);
}
GGUF_TYPE_Q6_K => {
return fused_gate_up_q6k_into(
&gate_weight.data,
&up_weight.data,
input,
in_dim,
out_dim,
gate_output,
up_output,
);
}
_ => {} // Fall through to rayon::join fallback
}
}
// Fallback: two separate matmuls via rayon::join
let (gate_result, up_result) = rayon::join(
|| self.fused_matmul_into(input, gate_weight, gate_output),
|| self.fused_matmul_into(input, up_weight, up_output),
);
gate_result?;
up_result?;
Ok(())
}
/// QKV projection matmul
pub fn qkv_matmul(&self, input: &[f32], qkv: &OwnedQKVWeights) -> Result<Vec<f32>> {
let hidden_dim = self.config.hidden_dim;
match qkv {
OwnedQKVWeights::Fused(ref weight) => self.fused_matmul(input, weight),
OwnedQKVWeights::Separate {
ref q,
ref k,
ref v,
} => {
let seq_len = input.len() / hidden_dim;
// P4: Parallel QKV projections — K+V overlap with Q tail
let (q_out, (k_out, v_out)) = rayon::join(
|| self.fused_matmul(input, q),
|| rayon::join(
|| self.fused_matmul(input, k),
|| self.fused_matmul(input, v),
),
);
let q_out = q_out?;
let k_out = k_out?;
let v_out = v_out?;
// Interleave Q, K, V for each position
let qkv_dim = q.out_dim + k.out_dim + v.out_dim;
let mut output = Vec::with_capacity(seq_len * qkv_dim);
for s in 0..seq_len {
output.extend_from_slice(&q_out[s * q.out_dim..(s + 1) * q.out_dim]);
output.extend_from_slice(&k_out[s * k.out_dim..(s + 1) * k.out_dim]);
output.extend_from_slice(&v_out[s * v.out_dim..(s + 1) * v.out_dim]);
}
Ok(output)
},
}
}
/// QKV matmul into pre-allocated buffer
pub fn qkv_matmul_into(
&self,
input: &[f32],
qkv: &OwnedQKVWeights,
output: &mut [f32],
) -> Result<()> {
match qkv {
OwnedQKVWeights::Fused(ref weight) => self.fused_matmul_into(input, weight, output),
OwnedQKVWeights::Separate {
ref q,
ref k,
ref v,
} => {
let q_dim = q.out_dim;
let k_dim = k.out_dim;
let v_dim = v.out_dim;
// P4: Parallel QKV projections — split output buffer, run concurrently
let (q_out, kv_out) = output[..q_dim + k_dim + v_dim].split_at_mut(q_dim);
let (k_out, v_out) = kv_out.split_at_mut(k_dim);
let (q_res, (k_res, v_res)) = rayon::join(
|| self.fused_matmul_into(input, q, q_out),
|| rayon::join(
|| self.fused_matmul_into(input, k, k_out),
|| self.fused_matmul_into(input, v, v_out),
),
);
q_res?;
k_res?;
v_res?;
Ok(())
},
}
}
/// Layer normalization
pub fn layer_norm(
&self,
input: &[f32],
weight: &[f32],
bias: Option<&[f32]>,
eps: f32,
) -> Vec<f32> {
ops::layer_norm(input, weight, bias, eps)
}
/// Add bias to activations
pub fn add_bias(&self, input: &mut [f32], bias: &[f32]) {
for (x, b) in input.iter_mut().zip(bias.iter()) {
*x += b;
}
}
/// GELU activation
///
/// ONE PATH: Per-element delegates to `trueno::gelu_scalar` (UCBD §4).
pub fn gelu(&self, input: &mut [f32]) {
for x in input.iter_mut() {
*x = trueno::gelu_scalar(*x);
}
}
/// Fused RMSNorm + matmul helper
fn fused_rmsnorm_matmul(
&self,
input: &[f32],
norm_weight: &[f32],
eps: f32,
weight: &OwnedQuantizedTensor,
) -> Result<Vec<f32>> {
use crate::quantize::fused_rmsnorm_q4_0_matmul;
// Only use fused path for Q4_0 weights (most common)
if weight.qtype == GGUF_TYPE_Q4_0 && input.len() == weight.in_dim {
return fused_rmsnorm_q4_0_matmul(
input,
norm_weight,
eps,
&weight.data,
weight.in_dim,
weight.out_dim,
);
}
// Fallback to separate RMSNorm + matmul for other types
let normed = ops::rms_norm(input, norm_weight, eps);
self.fused_matmul(&normed, weight)
}
/// Fused RMSNorm + QKV matmul
pub fn fused_rmsnorm_qkv_matmul(
&self,
input: &[f32],
norm_weight: &[f32],
eps: f32,
qkv: &OwnedQKVWeights,
) -> Result<Vec<f32>> {
match qkv {
OwnedQKVWeights::Fused(ref weight) => {
self.fused_rmsnorm_matmul(input, norm_weight, eps, weight)
},
OwnedQKVWeights::Separate {
ref q,
ref k,
ref v,
} => {
// For separate Q/K/V, normalize once and reuse
let normed = ops::rms_norm(input, norm_weight, eps);
// PMAT-114: Trace K weight to compare with APR
static ONCE: std::sync::atomic::AtomicBool =
std::sync::atomic::AtomicBool::new(false);
if std::env::var("APR_TRACE_WEIGHTS").is_ok()
&& !ONCE.swap(true, std::sync::atomic::Ordering::Relaxed)
{
eprintln!(
"[PMAT-114-GGUF] K weight: in_dim={}, out_dim={}, qtype={}, data_len={}",
k.in_dim,
k.out_dim,
k.qtype,
k.data.len()
);
// Dequantize first row completely to compare with APR
// Q4K: 144 bytes per super-block of 256 values, so first row = in_dim/256 super-blocks
let bytes_per_row = (k.in_dim.div_ceil(256)) * 144;
use crate::quantize::dequantize_q4_k_parallel;
if let Ok(dequant) = dequantize_q4_k_parallel(&k.data[0..bytes_per_row]) {
let row_mean: f32 = dequant.iter().sum::<f32>() / dequant.len() as f32;
let row_min = dequant.iter().cloned().fold(f32::INFINITY, f32::min);
let row_max = dequant.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
eprintln!("[PMAT-114-GGUF] K weight row 0 (dequant): mean={:.6}, min={:.6}, max={:.6}, len={}",
row_mean, row_min, row_max, dequant.len());
eprintln!(
"[PMAT-114-GGUF] K weight row 0 first10={:?}",
&dequant[..10.min(dequant.len())]
);
}
}
// P4: Parallel QKV projections — K+V overlap with Q tail
let (q_out, (k_out, v_out)) = rayon::join(
|| self.fused_matmul(&normed, q),
|| rayon::join(
|| self.fused_matmul(&normed, k),
|| self.fused_matmul(&normed, v),
),
);
let q_out = q_out?;
let k_out = k_out?;
let v_out = v_out?;
let qkv_dim = q.out_dim + k.out_dim + v.out_dim;
let mut output = Vec::with_capacity(qkv_dim);
output.extend_from_slice(&q_out);
output.extend_from_slice(&k_out);
output.extend_from_slice(&v_out);
Ok(output)
},
}
}
/// Fused RMSNorm + LM head
pub fn fused_rmsnorm_lm_head(&self, input: &[f32]) -> Result<Vec<f32>> {
use crate::quantize::fused_rmsnorm_q4_0_matmul;
// Only use fused path for Q4_0 weights
if self.lm_head_weight.qtype == GGUF_TYPE_Q4_0 && input.len() == self.lm_head_weight.in_dim
{
return fused_rmsnorm_q4_0_matmul(
input,
&self.output_norm_weight,
self.config.eps,
&self.lm_head_weight.data,
self.lm_head_weight.in_dim,
self.lm_head_weight.out_dim,
);
}
// Fallback to separate RMSNorm + matmul for other types
let normed = ops::rms_norm(input, &self.output_norm_weight, self.config.eps);
self.fused_matmul(&normed, &self.lm_head_weight)
}
/// Fused RMSNorm + FFN up/gate projections for SwiGLU
pub fn fused_rmsnorm_ffn_up_gate(
&self,
input: &[f32],
norm_weight: &[f32],
eps: f32,
up_weight: &OwnedQuantizedTensor,
gate_weight: &OwnedQuantizedTensor,
) -> Result<(Vec<f32>, Vec<f32>)> {
use crate::quantize::fused_rmsnorm_ffn_up_gate;
// Only use fused path for Q4_0 weights
if up_weight.qtype == GGUF_TYPE_Q4_0
&& gate_weight.qtype == GGUF_TYPE_Q4_0
&& input.len() == up_weight.in_dim
&& up_weight.in_dim == gate_weight.in_dim
&& up_weight.out_dim == gate_weight.out_dim
{
return fused_rmsnorm_ffn_up_gate(
input,
norm_weight,
eps,
&up_weight.data,
&gate_weight.data,
up_weight.in_dim,
up_weight.out_dim,
);
}
// PMAT-309: Shared Q8K for Q4K gate+up FALSIFIED (-3.4%, 31.5 vs 32.6).
// The Vec allocation overhead for quantize_for_q4k_matvec (3 Vecs)
// offsets the saving from skipping one Q8K quantize.
// The per-matmul path with stack-allocated Q8K is faster.
let normed = ops::rms_norm(input, norm_weight, eps);
let up_out = self.fused_matmul(&normed, up_weight)?;
let gate_out = self.fused_matmul(&normed, gate_weight)?;
Ok((up_out, gate_out))
}
/// Q8K QKV matmul into buffer
///
/// Uses pre-quantized Q8K activations for faster matmul with Q4K weights.
/// Dispatches to `fused_q4k_q8k_parallel_matvec_into` (maddubs-based, 32 vals/instr)
/// instead of the f32 dequant path (8 vals/instr) for ~3-4x speedup on QKV projections.
pub fn qkv_matmul_q8k_into(
&self,
input: &[f32],
qkv: &OwnedQKVWeights,
output: &mut [f32],
scales: &[f32],
quants: &[i8],
) -> Result<()> {
use crate::quantize::fused_q4k_q8k_parallel_matvec_into;
match qkv {
OwnedQKVWeights::Fused(ref weight) => {
if weight.qtype == GGUF_TYPE_Q4_K {
fused_q4k_q8k_parallel_matvec_into(
&weight.data,
scales,
quants,
weight.in_dim,
weight.out_dim,
output,
)
} else {
self.fused_matmul_into(input, weight, output)
}
}
OwnedQKVWeights::Separate {
ref q,
ref k,
ref v,
} => {
let q_dim = q.out_dim;
let k_dim = k.out_dim;
let v_dim = v.out_dim;
// P4: Parallel QKV projections — split output buffer, run concurrently
let (q_out, kv_out) = output[..q_dim + k_dim + v_dim].split_at_mut(q_dim);
let (k_out, v_out) = kv_out.split_at_mut(k_dim);
let q_fn = || -> Result<()> {
if q.qtype == GGUF_TYPE_Q4_K {
fused_q4k_q8k_parallel_matvec_into(
&q.data, scales, quants, q.in_dim, q_dim, q_out,
)
} else {
self.fused_matmul_into(input, q, q_out)
}
};
let k_fn = || -> Result<()> {
if k.qtype == GGUF_TYPE_Q4_K {
fused_q4k_q8k_parallel_matvec_into(
&k.data, scales, quants, k.in_dim, k_dim, k_out,
)
} else {
self.fused_matmul_into(input, k, k_out)
}
};
let v_fn = || -> Result<()> {
if v.qtype == GGUF_TYPE_Q4_K {
fused_q4k_q8k_parallel_matvec_into(
&v.data, scales, quants, v.in_dim, v_dim, v_out,
)
} else {
self.fused_matmul_into(input, v, v_out)
}
};
let (q_res, (k_res, v_res)) = rayon::join(q_fn, || rayon::join(k_fn, v_fn));
q_res?;
k_res?;
v_res?;
Ok(())
}
}
}
/// Helper to dequantize weights for CUDA GEMM
#[cfg(feature = "cuda")]
fn dequantize_weight_for_cuda(&self, weight: &OwnedQuantizedTensor) -> Result<Vec<f32>> {
use crate::quantize::{
dequantize_q4_0, dequantize_q4_1, dequantize_q4_k, dequantize_q5_0, dequantize_q5_k,
dequantize_q6_k, dequantize_q8_0,
};
match weight.qtype {
// GH-242: F32 weights are already dequantized — reinterpret bytes
GGUF_TYPE_F32 => {
let floats: Vec<f32> = weight
.data
.chunks_exact(4)
.map(|b| f32::from_le_bytes([b[0], b[1], b[2], b[3]]))
.collect();
Ok(floats)
}
// GH-242: F16 weights — convert to F32
GGUF_TYPE_F16 => {
let floats: Vec<f32> = weight
.data
.chunks_exact(2)
.map(|b| {
let bits = u16::from_le_bytes([b[0], b[1]]);
half::f16::from_bits(bits).to_f32()
})
.collect();
Ok(floats)
}
// GH-368: BF16 weights — convert to F32 (left-shift 16 bits)
GGUF_TYPE_BF16 => {
let floats: Vec<f32> = weight
.data
.chunks_exact(2)
.map(|b| {
let bits = u16::from_le_bytes([b[0], b[1]]);
f32::from_bits((bits as u32) << 16)
})
.collect();
Ok(floats)
}
GGUF_TYPE_Q4_0 => dequantize_q4_0(&weight.data),
GGUF_TYPE_Q4_1 => dequantize_q4_1(&weight.data),
GGUF_TYPE_Q5_0 => dequantize_q5_0(&weight.data),
GGUF_TYPE_Q8_0 => dequantize_q8_0(&weight.data),
GGUF_TYPE_Q4_K => dequantize_q4_k(&weight.data),
GGUF_TYPE_Q5_K => dequantize_q5_k(&weight.data),
GGUF_TYPE_Q6_K => dequantize_q6_k(&weight.data),
_ => Err(RealizarError::UnsupportedOperation {
operation: "dequantize_weight_for_cuda".to_string(),
reason: format!("Unsupported quantization type: {}", weight.qtype),
}),
}
}
}
include!("matmul_fused.rs");