Skip to main content

mlx_native/
kernel_registry.rs

1//! [`KernelRegistry`] — lazy compilation and caching of Metal compute pipelines.
2//!
3//! MSL shader source is embedded at compile time via `include_str!`.  On first
4//! access, the source is compiled into a Metal library, the named function is
5//! extracted, and a `ComputePipelineState` is created and cached.  Subsequent
6//! calls return the cached pipeline.
7
8use std::collections::HashMap;
9
10use metal::{ComputePipelineDescriptor, ComputePipelineState, FunctionConstantValues, MTLDataType};
11
12use crate::error::{MlxError, Result};
13
14// MTLDataType numeric values (from metal-rs argument.rs, confirmed in Apple Metal spec):
15//   Int  = 29
16//   Bool = 53
17// These are used when calling set_constant_value_at_index so the Metal runtime
18// knows how wide each constant value is.
19
20/// Registry that lazily compiles and caches Metal compute pipelines from
21/// embedded MSL source.
22///
23/// # Usage
24///
25/// ```ignore
26/// let mut registry = KernelRegistry::new();
27/// let pipeline = registry.get_pipeline("elementwise_add", device.metal_device())?;
28/// encoder.encode(&pipeline, &buffers, grid, tg);
29/// ```
30///
31/// # Thread Safety
32///
33/// `KernelRegistry` is **not** `Sync` by default (it uses `&mut self` for
34/// `get_pipeline` to allow mutable cache insertion).  If you need concurrent
35/// access, wrap it in a `Mutex` or use one registry per thread.
36pub struct KernelRegistry {
37    /// Cached pipelines keyed by kernel function name.
38    cache: HashMap<String, ComputePipelineState>,
39    /// MSL source text keyed by kernel function name.
40    ///
41    /// Populated at construction time with all embedded shader sources.
42    sources: HashMap<String, &'static str>,
43}
44
45impl KernelRegistry {
46    /// Create a new registry with all embedded shader sources pre-registered.
47    ///
48    /// No compilation happens here — shaders are compiled lazily on first use.
49    pub fn new() -> Self {
50        let mut sources = HashMap::new();
51
52        // Register embedded shader sources.
53        sources.insert(
54            "placeholder".into(),
55            include_str!("shaders/placeholder.metal"),
56        );
57        sources.insert(
58            "quantized_matmul".into(),
59            include_str!("shaders/quantized_matmul.metal"),
60        );
61        sources.insert(
62            "quantized_matmul_simd".into(),
63            include_str!("shaders/quantized_matmul.metal"),
64        );
65        sources.insert(
66            "quantized_matmul_simd_bf16".into(),
67            include_str!("shaders/quantized_matmul.metal"),
68        );
69        sources.insert(
70            "quantized_matmul_simd_bf16_expert".into(),
71            include_str!("shaders/quantized_matmul.metal"),
72        );
73
74        // GGML block-format quantized mat-vec kernels (ADR-006 Phase 3)
75        let ggml_src: &'static str =
76            include_str!("shaders/quantized_matmul_ggml.metal");
77        sources.insert("kernel_mul_mv_q4_0_f32".into(), ggml_src);
78        sources.insert("kernel_mul_mv_q8_0_f32".into(), ggml_src);
79        sources.insert("kernel_mul_mv_q6_K_f32".into(), ggml_src);
80        // ADR-028 iter-309 — q6_K mat-vec with nr0=2 + cached yl[16]
81        // (peer-pattern port of llama.cpp's `kernel_mul_mv_q6_K_f32_impl`
82        // with N_R0_Q6_K=2; 4 rows/TG vs baseline's 2).  Env-gated via
83        // `HF2Q_Q6K_MV_NR2=1` in the dispatcher.
84        sources.insert("kernel_mul_mv_q6_K_f32_nr2".into(), ggml_src);
85        // ADR-022 Phase 1 — Q5_1 / IQ4_NL dense mat-vec.
86        sources.insert("kernel_mul_mv_q5_1_f32".into(), ggml_src);
87        sources.insert("kernel_mul_mv_iq4_nl_f32".into(), ggml_src);
88        // ADR-013 P7 — Q4_K dense decode mat-vec (port of llama.cpp's
89        // kernel_mul_mv_q4_K_f32 at ggml-metal.metal:7715-7821).
90        sources.insert("kernel_mul_mv_q4_K_f32".into(), ggml_src);
91        // ADR-022 Phase 2 — Q5_K dense mv kernel.
92        sources.insert("kernel_mul_mv_q5_K_f32".into(), ggml_src);
93
94        // GGML block-format quantized matrix-matrix kernels
95        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's kernel_mul_mm_<q>_f32).
96        // Used at prefill m > 8 to reuse each weight tile across a 32-row
97        // block via threadgroup-staged simdgroup MMA, instead of re-reading
98        // every block per prompt-token as the mv kernel does.
99        let ggml_mm_src: &'static str =
100            include_str!("shaders/quantized_matmul_mm.metal");
101        sources.insert("kernel_mul_mm_q4_0_f32".into(), ggml_mm_src);
102        sources.insert("kernel_mul_mm_q8_0_f32".into(), ggml_mm_src);
103        sources.insert("kernel_mul_mm_q6_K_f32".into(), ggml_mm_src);
104        // ADR-022 Phase 1 — dense Q5_1 / IQ4_NL mm.
105        sources.insert("kernel_mul_mm_q5_1_f32".into(), ggml_mm_src);
106        sources.insert("kernel_mul_mm_iq4_nl_f32".into(), ggml_mm_src);
107        // ADR-022 Phase 2 — dense Q5_K mm.
108        sources.insert("kernel_mul_mm_q5_K_f32".into(), ggml_mm_src);
109        // ADR-022 Phase 3 — dense Q4_K mm.
110        sources.insert("kernel_mul_mm_q4_K_f32".into(), ggml_mm_src);
111
112        // GGML block-format quantized matrix-matrix kernels — tensor API
113        // variant (ADR-011 Phase 3 Wave P3b-tensor: port of llama.cpp's
114        // kernel_mul_mm_impl `#ifdef GGML_METAL_HAS_TENSOR` branch).
115        // Uses Apple's MetalPerformancePrimitives `tensor_ops::matmul2d`
116        // primitive which on M3+ dispatches to hardware tensor cores for
117        // 2-3x the effective FLOP throughput vs the simdgroup MMA path.
118        // Only compiled on devices where the tensor API is available; the
119        // kernel_registry's runtime-probe (see MlxDevice::has_tensor) gates
120        // compilation so non-tensor devices transparently fall back to the
121        // non-tensor `kernel_mul_mm_<q>_f32` kernels.
122        let ggml_mm_tensor_src: &'static str =
123            include_str!("shaders/quantized_matmul_mm_tensor.metal");
124        sources.insert("kernel_mul_mm_q4_0_tensor_f32".into(), ggml_mm_tensor_src);
125        sources.insert("kernel_mul_mm_q4_0_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
126        sources.insert("kernel_mul_mm_q6_K_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
127        sources.insert("kernel_mul_mm_q8_0_tensor_f32".into(), ggml_mm_tensor_src);
128        sources.insert("kernel_mul_mm_q6_K_tensor_f32".into(), ggml_mm_tensor_src);
129        // ADR-022 Phase 1 — Q5_1 / IQ4_NL tensor mm.
130        sources.insert("kernel_mul_mm_q5_1_tensor_f32".into(), ggml_mm_tensor_src);
131        sources.insert("kernel_mul_mm_iq4_nl_tensor_f32".into(), ggml_mm_tensor_src);
132        // ADR-022 Phase 2 — Q5_K tensor mm.
133        sources.insert("kernel_mul_mm_q5_K_tensor_f32".into(), ggml_mm_tensor_src);
134        // ADR-022 Phase 3 — Q4_K tensor mm + Q8_0 perm021.
135        sources.insert("kernel_mul_mm_q4_K_tensor_f32".into(), ggml_mm_tensor_src);
136        sources.insert("kernel_mul_mm_q8_0_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
137
138        // ADR-022 Phase 1 P1.7 — Q5_1 / IQ4_NL mul_mv_ext r1 family.
139        // Eight instantiations (2 types × 4 r1ptg widths). Each PSO is
140        // additionally specialized at PSO-compile time with FC_mul_mv_nsg
141        // (function_constant 600) and FC_mul_mv_nxpsg (function_constant 601).
142        let mul_mv_ext_src: &'static str = include_str!("shaders/mul_mv_ext.metal");
143        sources.insert("kernel_mul_mv_ext_q5_1_f32_r1_2".into(), mul_mv_ext_src);
144        sources.insert("kernel_mul_mv_ext_q5_1_f32_r1_3".into(), mul_mv_ext_src);
145        sources.insert("kernel_mul_mv_ext_q5_1_f32_r1_4".into(), mul_mv_ext_src);
146        sources.insert("kernel_mul_mv_ext_q5_1_f32_r1_5".into(), mul_mv_ext_src);
147        sources.insert("kernel_mul_mv_ext_iq4_nl_f32_r1_2".into(), mul_mv_ext_src);
148        sources.insert("kernel_mul_mv_ext_iq4_nl_f32_r1_3".into(), mul_mv_ext_src);
149        sources.insert("kernel_mul_mv_ext_iq4_nl_f32_r1_4".into(), mul_mv_ext_src);
150        sources.insert("kernel_mul_mv_ext_iq4_nl_f32_r1_5".into(), mul_mv_ext_src);
151        // ADR-022 Phase 4 — Q4_0 / Q8_0 / Q4_K / Q5_K / Q6_K mv_ext.
152        // 5 types × 4 r1ptg widths = 20 instantiations.
153        for r1 in [2, 3, 4, 5].iter() {
154            for ty in ["q4_0", "q8_0", "q4_K", "q5_K", "q6_K"].iter() {
155                let name = format!("kernel_mul_mv_ext_{ty}_f32_r1_{r1}");
156                sources.insert(name, mul_mv_ext_src);
157            }
158        }
159
160        // Dense bf16×f32 → f32 tensor-API matmul (non-flash-attention
161        // prefill Q@K^T and scores@V, modeled on llama.cpp's
162        // kernel_mul_mm_bf16_f32 with the GGML_METAL_HAS_TENSOR branch
163        // active).  Tile geometry and write-back identical to the
164        // quantized tensor kernel; only the A-stage copy (bfloat →
165        // bfloat, no dequantize) differs.
166        let dense_mm_bf16_tensor_src: &'static str =
167            include_str!("shaders/dense_mm_bf16_tensor.metal");
168        sources.insert("hf2q_dense_mm_bf16_f32_tensor".into(), dense_mm_bf16_tensor_src);
169
170        // Dense f32×f32 → f32 tensor-API matmul (F32-everywhere
171        // sibling of dense_mm_bf16_tensor).  Used by hf2q's ADR-005
172        // iter-118 BF16-vs-F32 ViT attention A/B diagnostic to remove
173        // the BF16 K-stage cast as a confounding variable.  Port of
174        // llama.cpp's kernel_mul_mm_f32_f32 specialization
175        // (ggml-metal.metal:10098) on the GGML_METAL_HAS_TENSOR
176        // branch.  Same tile geometry (NR0=64 NR1=32 NK=32) but
177        // float-everywhere shmem staging.
178        let dense_mm_f32_f32_tensor_src: &'static str =
179            include_str!("shaders/dense_mm_f32_f32.metal");
180        sources.insert("hf2q_dense_mm_f32_f32_tensor".into(), dense_mm_f32_f32_tensor_src);
181
182        // Dense f16×f32 → f32 tensor-API matmul (F16-staging sibling
183        // of dense_mm_bf16_tensor).  Used by hf2q's ADR-005 Phase 2c
184        // iter-128 gemma4v ViT precision-parity path: every mmproj
185        // weight is stored as F16 in GGUF, peer's `kernel_mul_mm_f16_f32`
186        // (`ggml-metal.metal:10099`) stages BOTH A and B as `half` in
187        // shmem and computes on `simdgroup_half8x8`.  Matches peer
188        // per-element rounding budget exactly (10-bit mantissa vs
189        // BF16's 7-bit), closing the 1.16x/block cascade compound that
190        // iter-127 numerically bisected to BF16 staging.  Same tile
191        // geometry as the BF16 sibling (NR0=64 NR1=32 NK=32, 8 KB
192        // shmem) — half and bfloat share 16-bit storage.
193        let dense_mm_f16_tensor_src: &'static str =
194            include_str!("shaders/dense_mm_f16_tensor.metal");
195        sources.insert("hf2q_dense_mm_f16_f32_tensor".into(), dense_mm_f16_tensor_src);
196
197        // Dense bf16×f32 → f32 GEMV (matrix-vector multiply) — optimized
198        // for M=1 single-token decode.  Port of llama.cpp's
199        // kernel_mul_mv_bf16_f32_4 (bfloat4-vectorized GEMV kernel).
200        // Used in apply_linear_projection_f32 when seq_len=1 and the
201        // weight matrix is BF16, replacing the MM kernel (~2× faster for
202        // M=1 due to better memory bandwidth utilization per thread).
203        let dense_gemv_bf16_src: &'static str =
204            include_str!("shaders/dense_gemv_bf16.metal");
205        sources.insert("hf2q_dense_gemv_bf16_f32_4".into(), dense_gemv_bf16_src);
206
207        // Fused scale-mask-softmax for the non-flash-attention prefill
208        // path.  One row-local threadgroup per (head, query) pair
209        // replaces three separate dispatches (scale, mask-add, softmax);
210        // reads a bf16 mask (-INF at masked positions, matching
211        // flash_attn_prefill_mask.metal) that is shared across heads.
212        let scale_mask_softmax_src: &'static str =
213            include_str!("shaders/scale_mask_softmax.metal");
214        sources.insert("scale_mask_softmax_f32".into(), scale_mask_softmax_src);
215
216        // Expert-routed (MoE) quantized matmul kernel (Story 2.1)
217        sources.insert(
218            "quantized_matmul_id".into(),
219            include_str!("shaders/quantized_matmul_id.metal"),
220        );
221
222        // Expert-routed (MoE) GGML block-format quantized matmul kernels
223        let ggml_id_src: &'static str =
224            include_str!("shaders/quantized_matmul_id_ggml.metal");
225        sources.insert("kernel_mul_mv_id_q4_0_f32".into(), ggml_id_src);
226        sources.insert("kernel_mul_mv_id_q8_0_f32".into(), ggml_id_src);
227        // ADR-013 P7 — Q4_K MoE expert-routed mat-vec (port of
228        // llama.cpp's kernel_mul_mv_id_q4_K_f32 at ggml-metal.metal:10349).
229        sources.insert("kernel_mul_mv_id_q4_K_f32".into(), ggml_id_src);
230        sources.insert("kernel_mul_mv_id_q5_K_f32".into(), ggml_id_src);
231        sources.insert("kernel_mul_mv_id_q6_K_f32".into(), ggml_id_src);
232        // ADR-028 iter-321 — q6_K _id with nr0=2 + cached yl[16]
233        // (peer-pattern port mirroring iter-309's non-_id variant).
234        // Env-gated via HF2Q_Q6K_ID_MV_NR2=1 in dispatch_id_mv.
235        sources.insert("kernel_mul_mv_id_q6_K_f32_nr2".into(), ggml_id_src);
236        // ADR-022 Phase 1 — Q5_1 / IQ4_NL MoE expert-routed mat-vec.
237        sources.insert("kernel_mul_mv_id_q5_1_f32".into(), ggml_id_src);
238        sources.insert("kernel_mul_mv_id_iq4_nl_f32".into(), ggml_id_src);
239        // Fused-SwiGLU mv_id variants (ADR-012 §Optimize / Task #15):
240        // computes y[r][n] = sum_k(dequant(W[expert][n][k]) * silu(gate[r][k]) * up[r][k])
241        // in one dispatch — replaces silu_mul + expert_down sequence.
242        sources.insert("kernel_mul_mv_id_q4_0_f32_swiglu".into(), ggml_id_src);
243
244        // Expert-routed (MoE) GGML block-format QUANTIZED MATRIX-MATRIX kernels
245        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's
246        // `kernel_mul_mm_id_map0_ne20_N` + `kernel_mul_mm_id_<q>_f32`).
247        // Two-stage dispatch: map0 regroups the token-to-expert table into
248        // per-expert routed-token lists, then mm_id stages a 64x32 expert
249        // weight tile into threadgroup shmem and reuses it across a 32-row
250        // block of that expert's routed tokens.
251        let ggml_id_mm_src: &'static str =
252            include_str!("shaders/quantized_matmul_id_mm.metal");
253        sources.insert("kernel_mul_mm_id_map0_ne20_1".into(), ggml_id_mm_src);
254        sources.insert("kernel_mul_mm_id_map0_ne20_8".into(), ggml_id_mm_src);
255        sources.insert("kernel_mul_mm_id_q4_0_f32".into(), ggml_id_mm_src);
256        sources.insert("kernel_mul_mm_id_q8_0_f32".into(), ggml_id_mm_src);
257        sources.insert("kernel_mul_mm_id_q6_K_f32".into(), ggml_id_mm_src);
258        // ADR-013 P16 — Q4_K mm_id (port of llama.cpp ggml-metal.metal:10169).
259        sources.insert("kernel_mul_mm_id_q4_K_f32".into(), ggml_id_mm_src);
260        // ADR-022 Phase 1 P1.6 — Q5_1 / IQ4_NL mm_id template instantiations.
261        sources.insert("kernel_mul_mm_id_q5_1_f32".into(), ggml_id_mm_src);
262        sources.insert("kernel_mul_mm_id_iq4_nl_f32".into(), ggml_id_mm_src);
263        // ADR-022 Phase 2 — Q5_K mm_id template instantiation.
264        sources.insert("kernel_mul_mm_id_q5_K_f32".into(), ggml_id_mm_src);
265
266        // MoE-routed quantized matrix-matrix kernels — tensor API variant
267        // (ADR-011 Phase 3 Wave P3b-tensor).  Uses the MPP tensor_ops
268        // matmul2d primitive for hardware-tensor-core MMA on M3+.  Only
269        // the mm_id kernel is ported — map0 is a short pre-pass (not
270        // matmul) and continues to use the simdgroup version.
271        let ggml_id_mm_tensor_src: &'static str =
272            include_str!("shaders/quantized_matmul_id_mm_tensor.metal");
273        sources.insert("kernel_mul_mm_id_q4_0_tensor_f32".into(), ggml_id_mm_tensor_src);
274        sources.insert("kernel_mul_mm_id_q8_0_tensor_f32".into(), ggml_id_mm_tensor_src);
275        sources.insert("kernel_mul_mm_id_q6_K_tensor_f32".into(), ggml_id_mm_tensor_src);
276        // ADR-013 P16 — Q4_K tensor-API mm_id.
277        sources.insert("kernel_mul_mm_id_q4_K_tensor_f32".into(), ggml_id_mm_tensor_src);
278        // ADR-022 Phase 1 P1.6 — Q5_1 / IQ4_NL tensor-API mm_id.
279        sources.insert("kernel_mul_mm_id_q5_1_tensor_f32".into(), ggml_id_mm_tensor_src);
280        sources.insert("kernel_mul_mm_id_iq4_nl_tensor_f32".into(), ggml_id_mm_tensor_src);
281        // ADR-022 Phase 2 — Q5_K tensor-API mm_id.
282        sources.insert("kernel_mul_mm_id_q5_K_tensor_f32".into(), ggml_id_mm_tensor_src);
283
284        // Embedding kernels (Story 1.5)
285        let embedding_src: &'static str = include_str!("shaders/embedding.metal");
286        sources.insert("embedding_gather_4bit".into(), embedding_src);
287        sources.insert("embedding_gather_6bit".into(), embedding_src);
288
289        // MoE gate kernel (Story 1.5)
290        let moe_gate_src: &'static str = include_str!("shaders/moe_gate.metal");
291        sources.insert("moe_gate".into(), moe_gate_src);
292
293        // MoE dispatch kernels (Story 1.5)
294        let moe_dispatch_src: &'static str = include_str!("shaders/moe_dispatch.metal");
295        sources.insert("fused_gelu_mul".into(), moe_dispatch_src);
296        sources.insert("moe_swiglu_fused".into(), moe_dispatch_src);
297        sources.insert("moe_swiglu_batch".into(), moe_dispatch_src);
298        sources.insert("moe_swiglu_seq".into(), moe_dispatch_src);
299        sources.insert("moe_accumulate".into(), moe_dispatch_src);
300        sources.insert("moe_weighted_sum".into(), moe_dispatch_src);
301        sources.insert("moe_weighted_sum_seq".into(), moe_dispatch_src);
302        sources.insert("zero_buffer".into(), moe_dispatch_src);
303        sources.insert("naive_matvec_f32".into(), moe_dispatch_src);
304        sources.insert("moe_gather_topk_weights".into(), moe_dispatch_src);
305        // bf16 variants (Phase 2 bf16 activation path)
306        sources.insert("fused_gelu_mul_bf16".into(), moe_dispatch_src);
307        sources.insert("moe_swiglu_seq_bf16".into(), moe_dispatch_src);
308        sources.insert("moe_weighted_sum_seq_bf16_input".into(), moe_dispatch_src);
309        // ADR-020 iter-11h-e3a: backward kernels for moe_weighted_sum_seq.
310        sources.insert(
311            "moe_weighted_sum_seq_backward_outputs_f32".into(),
312            moe_dispatch_src,
313        );
314        sources.insert(
315            "moe_weighted_sum_seq_backward_weights_f32".into(),
316            moe_dispatch_src,
317        );
318        // ADR-020 iter-11h-e3b: fused backward kernel for moe_swiglu_seq.
319        sources.insert(
320            "moe_swiglu_seq_backward_f32".into(),
321            moe_dispatch_src,
322        );
323
324        // Batched KV cache copy kernels
325        let kv_cache_src: &'static str = include_str!("shaders/kv_cache_copy.metal");
326        sources.insert("kv_cache_copy_batch_f32".into(), kv_cache_src);
327        sources.insert("kv_cache_copy_batch_f32_to_f16".into(), kv_cache_src);
328        sources.insert("kv_cache_copy_seq_f32".into(), kv_cache_src);
329        sources.insert("kv_cache_copy_seq_f32_to_f16".into(), kv_cache_src);
330        // Wave P4.11 — fused K+V copy variants
331        sources.insert("kv_cache_copy_seq_f32_kv_dual".into(), kv_cache_src);
332        sources.insert("kv_cache_copy_seq_f32_to_f16_kv_dual".into(), kv_cache_src);
333        // ADR-028 iter-145 — fused single-position K+V copy variants (decode shape)
334        sources.insert("kv_cache_copy_batch_f32_kv_dual".into(), kv_cache_src);
335        sources.insert("kv_cache_copy_batch_f32_to_f16_kv_dual".into(), kv_cache_src);
336        // bf16-source KV cache copy (Phase 2 bf16 activation path)
337        sources.insert("kv_cache_copy_seq_bf16".into(), kv_cache_src);
338
339        // Elementwise and transpose kernels (Story 1.5)
340        let elementwise_src: &'static str = include_str!("shaders/elementwise.metal");
341        sources.insert("elementwise_add_f32".into(), elementwise_src);
342        sources.insert("elementwise_add_f16".into(), elementwise_src);
343        sources.insert("elementwise_mul_f32".into(), elementwise_src);
344        sources.insert("elementwise_mul_f16".into(), elementwise_src);
345        sources.insert("elementwise_add_bf16".into(), elementwise_src);
346        sources.insert("elementwise_mul_bf16".into(), elementwise_src);
347        sources.insert("cast_f16_to_f32".into(), elementwise_src);
348        sources.insert("cast_f32_to_f16".into(), elementwise_src);
349        sources.insert("cast_bf16_to_f32".into(), elementwise_src);
350        sources.insert("cast_f32_to_bf16".into(), elementwise_src);
351        sources.insert("scalar_mul_bf16".into(), elementwise_src);
352        sources.insert("scalar_mul_f32".into(), elementwise_src);
353        sources.insert("embedding_gather_scale_f32".into(), elementwise_src);
354        sources.insert("embedding_gather_scale_batch_f32".into(), elementwise_src);
355        sources.insert("permute_021_bf16".into(), elementwise_src);
356        sources.insert("transpose_last2_bf16".into(), elementwise_src);
357        sources.insert("transpose_last2_f16".into(), elementwise_src);
358        sources.insert("permute_021_f32".into(), elementwise_src);
359        sources.insert("permute_021_bf16_to_f32".into(), elementwise_src);
360        sources.insert("transpose_2d_f32".into(), elementwise_src);
361        sources.insert("transpose_2d_f16".into(), elementwise_src);
362
363        // Attention kernels (Story 1.3)
364        let sdpa_src: &'static str = include_str!("shaders/sdpa.metal");
365        sources.insert("sdpa".into(), sdpa_src);
366        sources.insert("sdpa_bf16".into(), sdpa_src);
367        let sdpa_sliding_src: &'static str = include_str!("shaders/sdpa_sliding.metal");
368        sources.insert("sdpa_sliding".into(), sdpa_sliding_src);
369        sources.insert("sdpa_sliding_bf16".into(), sdpa_sliding_src);
370
371        // Flash-attention tiled prefill kernel (ADR-011 Phase 1).
372        // Ten entry points; all backed by the same shader source.
373        // Pipelines are compiled with function constants via
374        // `get_pipeline_with_bool_constants` — not `get_pipeline`.
375        let flash_attn_prefill_src: &'static str =
376            include_str!("shaders/flash_attn_prefill.metal");
377        // D=256 variants (BQ=32, BK=16, WM=4, WN=1 — 128 threads/threadgroup)
378        sources.insert(
379            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskfloat32".into(),
380            flash_attn_prefill_src,
381        );
382        sources.insert(
383            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
384            flash_attn_prefill_src,
385        );
386        sources.insert(
387            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbfloat16".into(),
388            flash_attn_prefill_src,
389        );
390        sources.insert(
391            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
392            flash_attn_prefill_src,
393        );
394        sources.insert(
395            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskfloat16".into(),
396            flash_attn_prefill_src,
397        );
398        sources.insert(
399            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
400            flash_attn_prefill_src,
401        );
402        // D=512 variants (BQ=8, BK=8, WM=1, WN=1 — 32 threads/threadgroup)
403        // NOTE: f32 at D=512 is NOT instantiated — threadgroup memory exceeds
404        // the 32 KB Metal limit (candle sdpa.rs:86-94).
405        sources.insert(
406            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbfloat16".into(),
407            flash_attn_prefill_src,
408        );
409        sources.insert(
410            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
411            flash_attn_prefill_src,
412        );
413        sources.insert(
414            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskfloat16".into(),
415            flash_attn_prefill_src,
416        );
417        sources.insert(
418            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
419            flash_attn_prefill_src,
420        );
421
422        // Flash attention vector kernels — SIMD-vectorized decode-path SDPA
423        // (ported from llama.cpp flash_attn_ext_vec)
424        let flash_attn_vec_src: &'static str =
425            include_str!("shaders/flash_attn_vec.metal");
426        sources.insert("flash_attn_vec_dk256".into(), flash_attn_vec_src);
427        sources.insert("flash_attn_vec_dk512".into(), flash_attn_vec_src);
428        sources.insert("flash_attn_vec_reduce_dk256".into(), flash_attn_vec_src);
429        sources.insert("flash_attn_vec_reduce_dk512".into(), flash_attn_vec_src);
430        // F16 KV variants (Phase 4a)
431        sources.insert("flash_attn_vec_f16kv_dk256".into(), flash_attn_vec_src);
432        sources.insert("flash_attn_vec_f16kv_dk512".into(), flash_attn_vec_src);
433
434        // RoPE, normalization, activation kernels (Story 1.4)
435        let rope_src: &'static str = include_str!("shaders/rope.metal");
436        sources.insert("rope_f32".into(), rope_src);
437        sources.insert("rope_f16".into(), rope_src);
438        sources.insert("rope_bf16".into(), rope_src);
439        sources.insert("rope_neox_bf16".into(), rope_src);
440        sources.insert("rope_neox_f32".into(), rope_src);
441        let rms_norm_src: &'static str = include_str!("shaders/rms_norm.metal");
442        sources.insert("rms_norm_f32".into(), rms_norm_src);
443        // ADR-028 iter-310 — float4 + simd_sum variants (peer-pattern,
444        // ported from llama.cpp kernel_rms_norm_fuse_impl<float4, 1>).
445        // Env-gated via HF2Q_RMS_NORM_V2=1 in the dispatchers.
446        sources.insert("rms_norm_f32_v2".into(), rms_norm_src);
447        sources.insert("rms_norm_no_scale_f32_v2".into(), rms_norm_src);
448        sources.insert("rms_norm_f16".into(), rms_norm_src);
449        sources.insert("rms_norm_bf16".into(), rms_norm_src);
450        sources.insert("rms_norm_no_scale_bf16".into(), rms_norm_src);
451        sources.insert("rms_norm_no_scale_f32".into(), rms_norm_src);
452        sources.insert("rms_norm_no_scale_f32_dual".into(), rms_norm_src);
453        sources.insert("rms_norm_f32_triple".into(), rms_norm_src);
454        sources.insert("fused_post_attn_triple_norm_f32".into(), rms_norm_src);
455        // ADR-028 iter-217: fused post-FF norm 2 + end-of-layer FINAL
456        // (combines 2 sequential fused_norm_add dispatches into 1 kernel).
457        sources.insert("fused_post_ff_norm2_endlayer_f32".into(), rms_norm_src);
458        sources.insert("rms_norm_no_scale_f32_dual_perm".into(), rms_norm_src);
459        // Fused RMS norm + elementwise multiply kernels (Phase 4e.2)
460        sources.insert("rms_norm_mul_f32".into(), rms_norm_src);
461        sources.insert("rms_norm_mul_f16".into(), rms_norm_src);
462        sources.insert("rms_norm_mul_bf16".into(), rms_norm_src);
463        // L2 norm kernels (ADR-013 Decision 3 — Gated DeltaNet Q/K norm)
464        let l2_norm_src: &'static str = include_str!("shaders/l2_norm.metal");
465        sources.insert("l2_norm_f32".into(), l2_norm_src);
466        sources.insert("l2_norm_f16".into(), l2_norm_src);
467        sources.insert("l2_norm_bf16".into(), l2_norm_src);
468        // Cumulative-sum kernels (ADR-013 Decision 4 — DeltaNet decay-mask base)
469        let cumsum_src: &'static str = include_str!("shaders/cumsum.metal");
470        sources.insert("cumsum_f32".into(), cumsum_src);
471        sources.insert("cumsum_bf16".into(), cumsum_src);
472        // SSM conv kernels (ADR-013 Decision 7 — DeltaNet 1D causal conv + SiLU)
473        let ssm_conv_src: &'static str = include_str!("shaders/ssm_conv.metal");
474        sources.insert("ssm_conv_forward_f32".into(), ssm_conv_src);
475        sources.insert("ssm_conv_forward_bf16".into(), ssm_conv_src);
476        sources.insert("ssm_conv_state_update_f32".into(), ssm_conv_src);
477        sources.insert("ssm_conv_state_update_bf16".into(), ssm_conv_src);
478        // Tri-solve kernels (ADR-013 Decision 5 — chunked DeltaNet debug path)
479        let tri_solve_src: &'static str = include_str!("shaders/tri_solve.metal");
480        sources.insert("tri_solve_lower_unit_f32".into(), tri_solve_src);
481        sources.insert("tri_solve_lower_unit_bf16".into(), tri_solve_src);
482        // Rope-multi kernels (ADR-013 Decision 10 — IMROPE for Qwen3.5)
483        let rope_multi_src: &'static str = include_str!("shaders/rope_multi.metal");
484        sources.insert("rope_multi_f32".into(), rope_multi_src);
485        sources.insert("rope_multi_bf16".into(), rope_multi_src);
486        // Gated DeltaNet fused kernel (ADR-013 Decision 6 — centerpiece)
487        let gdn_src: &'static str = include_str!("shaders/gated_delta_net.metal");
488        sources.insert("gated_delta_net_f32".into(), gdn_src);
489        // ADR-015 iter56 — decode-only `simd_sum` variant. Three NSG-templated
490        // host names share the same source; selection is by D_k via
491        // `dispatch_gated_delta_net_decode`. Drop-in for the fused kernel
492        // above when n_tokens=1.
493        let gdn_decode_src: &'static str =
494            include_str!("shaders/gated_delta_net_decode.metal");
495        sources.insert("gated_delta_net_decode_f32_1".into(), gdn_decode_src);
496        sources.insert("gated_delta_net_decode_f32_2".into(), gdn_decode_src);
497        sources.insert("gated_delta_net_decode_f32_4".into(), gdn_decode_src);
498        // Wave 5b — chunk-parallel inter-chunk state-recurrence kernel
499        // (the one new kernel in the chunk-parallel pipeline; spec source:
500        // arXiv 2412.06464 §4 + FLA chunk_delta_h.py:43-298).
501        let gdn_chunk_src: &'static str =
502            include_str!("shaders/gated_delta_net_chunk.metal");
503        sources.insert(
504            "gated_delta_net_chunk_inter_state_bf16".into(),
505            gdn_chunk_src,
506        );
507        // Wave 5b.1 iter 2 — chunk_scaled_dot_kkt kernel (input-side of
508        // the chunk pipeline; spec source: FLA chunk_scaled_dot_kkt.py:36-99).
509        let gdn_kkt_src: &'static str =
510            include_str!("shaders/gated_delta_net_kkt.metal");
511        sources.insert("gated_delta_net_kkt_bf16".into(), gdn_kkt_src);
512        // Wave 5b.1 iter 2 — recompute_w_u_fwd kernel (applies post-solve A
513        // to (β·v) and (β·k·exp(g)) to produce w and u; spec source: FLA
514        // wy_fast.py:29-117).
515        let gdn_recompute_wu_src: &'static str =
516            include_str!("shaders/gated_delta_net_recompute_wu.metal");
517        sources.insert(
518            "gated_delta_net_recompute_wu_bf16".into(),
519            gdn_recompute_wu_src,
520        );
521        // Wave 5b.1 iter 3 — chunk_fwd_o kernel (per-chunk output: closes
522        // the chunk pipeline; spec source: FLA chunk_o.py:42-138).
523        let gdn_chunk_o_src: &'static str =
524            include_str!("shaders/gated_delta_net_chunk_o.metal");
525        sources.insert("gated_delta_net_chunk_o_bf16".into(), gdn_chunk_o_src);
526        // Wave 5b.1 iter 4 — orchestrator helper kernels:
527        //   chunk_local_cumsum_g_f32      — per-chunk prefix sum on g [B, T, H]
528        //   chunk_tri_solve_invert_f32    — per-chunk-block (I + A_strict)^-1
529        //                                   on FLA's [B, T, H, BT] layout.
530        let chunk_local_cumsum_g_src: &'static str =
531            include_str!("shaders/chunk_local_cumsum_g.metal");
532        sources.insert(
533            "chunk_local_cumsum_g_f32".into(),
534            chunk_local_cumsum_g_src,
535        );
536        let chunk_tri_solve_invert_src: &'static str =
537            include_str!("shaders/chunk_gated_delta_rule_tri_solve_invert.metal");
538        sources.insert(
539            "chunk_tri_solve_invert_f32".into(),
540            chunk_tri_solve_invert_src,
541        );
542        // Sigmoid-gated elementwise multiply (ADR-013 Decision 9 — full-attn output gate)
543        let sigmoid_mul_src: &'static str = include_str!("shaders/sigmoid_mul.metal");
544        sources.insert("sigmoid_mul_f32".into(), sigmoid_mul_src);
545        sources.insert("sigmoid_mul_bf16".into(), sigmoid_mul_src);
546        let silu_mul_src: &'static str = include_str!("shaders/silu_mul.metal");
547        sources.insert("silu_mul_f32".into(), silu_mul_src);
548        let compute_g_beta_src: &'static str = include_str!("shaders/compute_g_beta.metal");
549        sources.insert("compute_g_beta_f32".into(), compute_g_beta_src);
550        let ssm_norm_gate_src: &'static str = include_str!("shaders/ssm_norm_gate.metal");
551        sources.insert("ssm_norm_gate_f32".into(), ssm_norm_gate_src);
552        let gelu_src: &'static str = include_str!("shaders/gelu.metal");
553        sources.insert("gelu_f32".into(), gelu_src);
554        sources.insert("gelu_f16".into(), gelu_src);
555        sources.insert("gelu_bf16".into(), gelu_src);
556        let softmax_src: &'static str = include_str!("shaders/softmax.metal");
557        sources.insert("softmax_f32".into(), softmax_src);
558        sources.insert("softmax_f16".into(), softmax_src);
559        sources.insert("softmax_bf16".into(), softmax_src);
560        let softmax_backward_src: &'static str =
561            include_str!("shaders/softmax_backward.metal");
562        sources.insert("softmax_backward_f32".into(), softmax_backward_src);
563        let log_elementwise_src: &'static str =
564            include_str!("shaders/log_elementwise.metal");
565        sources.insert("log_f32".into(), log_elementwise_src);
566        sources.insert("log_backward_f32".into(), log_elementwise_src);
567        let row_sum_src: &'static str = include_str!("shaders/row_sum.metal");
568        sources.insert("row_sum_f32".into(), row_sum_src);
569        sources.insert("row_sum_backward_f32".into(), row_sum_src);
570        // ADR-020 iter-10a: GGUF-legacy quantize-dequantize round-trip kernels
571        // (Q4_0 + Q8_0).  Used by hf2q's dynamic_quant Track 1 to produce
572        // W_low / W_high for the gradient-Taylor sensitivity formula.
573        let qdq_legacy_src: &'static str = include_str!("shaders/qdq_legacy.metal");
574        sources.insert("qdq_q4_0_f32".into(), qdq_legacy_src);
575        sources.insert("qdq_q8_0_f32".into(), qdq_legacy_src);
576        // ADR-020 iter-10b: RMSNorm reverse-mode autograd kernels.
577        // r_inv helper is reused by both backward kernels; dx and dw cover
578        // the full backward identity for `y = x * rsqrt(mean(x²) + eps) * w`.
579        let rms_norm_backward_src: &'static str =
580            include_str!("shaders/rms_norm_backward.metal");
581        sources.insert(
582            "rms_norm_compute_rms_inv_f32".into(),
583            rms_norm_backward_src,
584        );
585        sources.insert("rms_norm_backward_dx_f32".into(), rms_norm_backward_src);
586        sources.insert("rms_norm_backward_dw_f32".into(), rms_norm_backward_src);
587        // ADR-020 iter-11a: 2-D row-major slice + concat-by-column kernels.
588        // Used by hf2q's multi-head SDPA on GpuTape (slice Q/K/V into
589        // per-head views, run per-head SDPA, concat per-head contexts
590        // back to full attention output).
591        let slice_concat_2d_src: &'static str =
592            include_str!("shaders/slice_concat_2d.metal");
593        sources.insert("slice_2d_cols_f32".into(), slice_concat_2d_src);
594        sources.insert("copy_2d_cols_into_f32".into(), slice_concat_2d_src);
595        // ADR-020 iter-11b: SiLU forward + backward kernels for GpuTape
596        // SwiGLU FFN composition.
597        let silu_backward_src: &'static str =
598            include_str!("shaders/silu_backward.metal");
599        sources.insert("silu_f32".into(), silu_backward_src);
600        sources.insert("silu_backward_f32".into(), silu_backward_src);
601        // ADR-020 iter-11d: FP32 embedding lookup + scatter-add backward.
602        let embedding_autograd_src: &'static str =
603            include_str!("shaders/embedding_autograd.metal");
604        sources.insert("embedding_lookup_f32".into(), embedding_autograd_src);
605        sources.insert(
606            "embedding_scatter_add_f32".into(),
607            embedding_autograd_src,
608        );
609        // ADR-020 iter-13a: Adam optimizer step kernel for Track 2
610        // DWQ-proper training loop.
611        let adam_update_src: &'static str =
612            include_str!("shaders/adam_update.metal");
613        sources.insert("adam_update_f32".into(), adam_update_src);
614        // ADR-020 iter-13b: differentiable affine qdq kernels for the
615        // DWQ-proper training loop.  Init + forward + backward (scales,
616        // biases) — q_int is FROZEN, scales+biases learnable.
617        let qdq_affine_src: &'static str =
618            include_str!("shaders/qdq_affine.metal");
619        sources.insert("qdq_affine_init_f32".into(), qdq_affine_src);
620        sources.insert("qdq_affine_forward_f32".into(), qdq_affine_src);
621        sources.insert(
622            "qdq_affine_backward_scales_f32".into(),
623            qdq_affine_src,
624        );
625        sources.insert(
626            "qdq_affine_backward_biases_f32".into(),
627            qdq_affine_src,
628        );
629        // ADR-020 iter-15: fused affine quantized matmul for DWQ inference.
630        // Per-element kernel; one thread per (m, n) output element.
631        // Tiled + simdgroup-MMA variant lands in iter-15b.
632        let qmm_affine_src: &'static str =
633            include_str!("shaders/qmm_affine.metal");
634        sources.insert("qmm_affine_t_f32".into(), qmm_affine_src);
635        // ADR-020 iter-15b: tiled variant — 16x16 thread block with
636        // cooperative-load X/W tiles in threadgroup-shared memory for
637        // 2-5x speedup over the per-element kernel.
638        let qmm_affine_tiled_src: &'static str =
639            include_str!("shaders/qmm_affine_tiled.metal");
640        sources.insert(
641            "qmm_affine_t_f32_tiled".into(),
642            qmm_affine_tiled_src,
643        );
644        // ADR-020 iter-15c: simdgroup-MMA variant — uses Apple GPU
645        // hardware `simdgroup_matrix<float, 8, 8>` MMA for the inner
646        // reduction.  Per-tile algorithmic 8× over scalar tiled, lands
647        // as ~3-4× wall after launch / load amortization.
648        let qmm_affine_simd_src: &'static str =
649            include_str!("shaders/qmm_affine_simd.metal");
650        sources.insert(
651            "qmm_affine_t_f32_simd".into(),
652            qmm_affine_simd_src,
653        );
654        // ADR-020 iter-15c-2: 4-simdgroup-per-TG variant — 32×32
655        // output tile, 4 simdgroups arranged as 2×2 grid each owning
656        // a 16×16 sub-tile = 4 simdgroup_matrix accumulators.  Same
657        // math as 15c-1, fuller warp-pool exploitation.
658        let qmm_affine_simd4_src: &'static str =
659            include_str!("shaders/qmm_affine_simd4.metal");
660        sources.insert(
661            "qmm_affine_t_f32_simd4".into(),
662            qmm_affine_simd4_src,
663        );
664        // ADR-020 iter-15c-2b: gs=64 variant (mlx-lm dynamic_quant
665        // canonical default).  Same 4-simdgroup geometry, BK=64
666        // instead of 32 (= 8 sub-K-tiles per K-step instead of 4).
667        let qmm_affine_simd4_gs64_src: &'static str =
668            include_str!("shaders/qmm_affine_simd4_gs64.metal");
669        sources.insert(
670            "qmm_affine_t_f32_simd4_gs64".into(),
671            qmm_affine_simd4_gs64_src,
672        );
673        // ADR-020 AC#5 Iter A: packed-U32 dense affine matmul (bits=4,
674        // gs=32) — production decode/prefill kernel for serving DWQ
675        // safetensors directly without a load-time unpack pass.
676        let qmm_affine_t_packed_simd4_b4_src: &'static str =
677            include_str!("shaders/qmm_affine_t_packed_simd4_b4.metal");
678        sources.insert(
679            "qmm_affine_t_packed_simd4_b4".into(),
680            qmm_affine_t_packed_simd4_b4_src,
681        );
682        // ADR-020 iter-11h-b: training-mode causal depthwise 1D
683        // convolution (forward + backward dx + backward dw).  Used by
684        // GpuTape autograd for differentiable Qwen3.5MoE forward
685        // (GatedDeltaNet's conv1d step).
686        let conv1d_dwc_src: &'static str =
687            include_str!("shaders/conv1d_depthwise_causal.metal");
688        sources.insert(
689            "conv1d_depthwise_causal_forward_f32".into(),
690            conv1d_dwc_src,
691        );
692        sources.insert(
693            "conv1d_depthwise_causal_backward_dx_f32".into(),
694            conv1d_dwc_src,
695        );
696        sources.insert(
697            "conv1d_depthwise_causal_backward_dw_f32".into(),
698            conv1d_dwc_src,
699        );
700        // ADR-020 iter-11h-c1: elementwise exp forward + backward.
701        // Building block for GatedDeltaNet's alpha = exp(-g) state-decay.
702        let exp_src: &'static str =
703            include_str!("shaders/exp_elementwise.metal");
704        sources.insert("exp_f32".into(), exp_src);
705        sources.insert("exp_backward_f32".into(), exp_src);
706        // ADR-020 iter-11h-c2: vector outer product (forward + dlhs +
707        // drhs).  Building block for gated_delta_update's
708        // outer(delta, k) state-update term.
709        let outer_src: &'static str =
710            include_str!("shaders/outer_product.metal");
711        sources.insert("outer_product_f32".into(), outer_src);
712        sources.insert("outer_product_backward_lhs_f32".into(), outer_src);
713        sources.insert("outer_product_backward_rhs_f32".into(), outer_src);
714        // ADR-020 iter-11h-e1: take_along_axis (gather) + scatter-backward.
715        // Building block for MoE router on GpuTape.
716        let taa_src: &'static str =
717            include_str!("shaders/take_along_axis.metal");
718        sources.insert("take_along_axis_f32".into(), taa_src);
719        sources.insert("take_along_axis_backward_f32".into(), taa_src);
720        // ADR-020 iter-11h-misc-1: elementwise divide forward + backward.
721        let div_src: &'static str =
722            include_str!("shaders/divide_elementwise.metal");
723        sources.insert("divide_f32".into(), div_src);
724        sources.insert("divide_backward_f32".into(), div_src);
725        // ADR-020 iter-11h-misc-3: elementwise sqrt forward + backward.
726        let sqrt_src: &'static str =
727            include_str!("shaders/sqrt_elementwise.metal");
728        sources.insert("sqrt_f32".into(), sqrt_src);
729        sources.insert("sqrt_backward_f32".into(), sqrt_src);
730        let softcap_src: &'static str = include_str!("shaders/softcap.metal");
731        sources.insert("softcap_f32".into(), softcap_src);
732        sources.insert("softcap_f16".into(), softcap_src);
733        sources.insert("softcap_bf16".into(), softcap_src);
734
735        // Fused norm-add kernels — Gemma4 post-attention / post-FFN ordering:
736        //   normed = rms_norm(input, weight, eps);  output = residual + normed
737        let fused_norm_add_src: &'static str =
738            include_str!("shaders/fused_norm_add_bf16.metal");
739        sources.insert("fused_norm_add_bf16".into(), fused_norm_add_src);
740        sources.insert("fused_norm_add_no_weight_bf16".into(), fused_norm_add_src);
741
742        // Fused head-norm + RoPE f32 kernel — replaces separate rms_norm + rope_neox_f32
743        let fused_hnr_f32_src: &'static str =
744            include_str!("shaders/fused_head_norm_rope_f32.metal");
745        sources.insert("fused_head_norm_rope_f32".into(), fused_hnr_f32_src);
746        // ADR-028 iter-337 — float4 + simd_sum Phase 1 variant.  Phases
747        // 2-4 byte-identical to v1; race-fix barrier preserved.  Env-gated
748        // via HF2Q_FUSED_HEAD_NORM_ROPE_V2 (default ON, opt-out via =0).
749        sources.insert("fused_head_norm_rope_f32_v2".into(), fused_hnr_f32_src);
750
751        // Fused head-norm + RoPE bf16 kernels (single-token + batch prefill)
752        // Both entry points live in the same .metal file.
753        let fused_hnr_bf16_src: &'static str =
754            include_str!("shaders/fused_head_norm_rope_bf16.metal");
755        sources.insert("fused_head_norm_rope_bf16".into(), fused_hnr_bf16_src);
756        sources.insert("fused_head_norm_rope_batch_bf16".into(), fused_hnr_bf16_src);
757
758        // Fused norm-add f32 kernels — post-attention / post-FFN / end-of-layer
759        let fused_norm_add_f32_src: &'static str =
760            include_str!("shaders/fused_norm_add_f32.metal");
761        sources.insert("fused_norm_add_f32".into(), fused_norm_add_f32_src);
762        // ADR-028 iter-331 — float4 + simd_sum variant (peer-pattern,
763        // ported from llama.cpp kernel_rms_norm_fuse_impl<float4, 3>).
764        // Env-gated via HF2Q_FUSED_NORM_ADD_V2=1 in the dispatcher
765        // (default ON since iter-331; opt-out via =0/false/off).
766        sources.insert("fused_norm_add_f32_v2".into(), fused_norm_add_f32_src);
767        sources.insert("fused_residual_norm_f32".into(), fused_norm_add_f32_src);
768        sources.insert("fused_residual_norm_scalar_f32".into(), fused_norm_add_f32_src);
769        sources.insert("fused_moe_routing_f32".into(), fused_norm_add_f32_src);
770        sources.insert("fused_moe_routing_batch_f32".into(), fused_norm_add_f32_src);
771        sources.insert("fused_norm_add_scalar_f32".into(), fused_norm_add_f32_src);
772        sources.insert("fused_moe_wsum_norm_add_f32".into(), fused_norm_add_f32_src);
773        sources.insert("fused_moe_wsum_dnorm_add_f32".into(), fused_norm_add_f32_src);
774
775        // Argsort kernel (Story 2.3) — MoE top-K routing
776        let argsort_src: &'static str = include_str!("shaders/argsort.metal");
777        sources.insert("argsort_desc_f32".into(), argsort_src);
778
779        // Gather / index_select kernel (Story 2.4)
780        let gather_src: &'static str = include_str!("shaders/gather.metal");
781        sources.insert("gather_f32".into(), gather_src);
782
783        // F32 KV cache copy kernel (Session merge S1+S2)
784        let kv_cache_copy_src: &'static str =
785            include_str!("shaders/kv_cache_copy.metal");
786        sources.insert("kv_cache_copy".into(), kv_cache_copy_src);
787        sources.insert("kv_cache_copy_f32".into(), kv_cache_copy_src);
788
789        // Strided copy kernel (Story 2.5)
790        let copy_src: &'static str = include_str!("shaders/copy.metal");
791        sources.insert("strided_copy_f32".into(), copy_src);
792        sources.insert("offset_copy_f32".into(), copy_src);
793
794        // Fused-QKV split kernel (ADR-005 W-5b.18 — replaces hf2q CPU
795        // download → triple-loop split → 3× upload round-trip in
796        // gpu_delta_net::layer_qkv_deinterleave).
797        let qkv_split_src: &'static str = include_str!("shaders/qkv_split.metal");
798        sources.insert("qkv_split_f32".into(), qkv_split_src);
799
800        // Tiled-GQA broadcast kernel (ADR-005 W-5b.19 — replaces hf2q CPU
801        // tiled-replicate at gpu_delta_net::apply_gated_delta_net_chunk
802        // GQA pre-expansion, ~497 ms / 10.4 ms-per-layer at PP4106).
803        let repeat_tiled_src: &'static str =
804            include_str!("shaders/repeat_tiled.metal");
805        sources.insert("repeat_tiled_f32".into(), repeat_tiled_src);
806
807        // Dense F16 GEMM kernel (Story 2.6) — lm_head projection
808        let dense_gemm_src: &'static str = include_str!("shaders/dense_gemm.metal");
809        sources.insert("dense_gemm_f16".into(), dense_gemm_src);
810        sources.insert("dense_matvec_f16".into(), dense_gemm_src);
811        sources.insert("dense_matvec_f16w_f32io".into(), dense_gemm_src);
812        // BF16-weight mat-vec: BF16 weights × F32 input → F32 output (decode lm_head)
813        sources.insert("dense_matvec_bf16w_f32io".into(), dense_gemm_src);
814        // Pure F32 mat-vec: F32 weights × F32 input → F32 output (decode lm_head)
815        sources.insert("dense_matvec_f32".into(), dense_gemm_src);
816
817        // Standalone FWHT for TurboQuant pre/post-rotation (SIMD shuffle, zero barriers)
818        let fwht_src: &'static str = include_str!("shaders/fwht_standalone.metal");
819        sources.insert("fwht_standalone_f32_d256".into(), fwht_src);
820        sources.insert("fwht_standalone_f32_d512".into(), fwht_src);
821        // ADR-007 iter-14 D1 SRHT variants: sign pre-mult (for Q) + sign undo (for output)
822        sources.insert("fwht_sign_premult_f32_d256".into(), fwht_src);
823        sources.insert("fwht_sign_premult_f32_d512".into(), fwht_src);
824        sources.insert("fwht_sign_undo_f32_d256".into(), fwht_src);
825        sources.insert("fwht_sign_undo_f32_d512".into(), fwht_src);
826
827        // Fast Hadamard quantize (SIMD shuffle, zero barriers)
828        let hq_fast_src: &'static str = include_str!("shaders/hadamard_quantize_kv_fast.metal");
829        sources.insert("hadamard_quantize_kv_fast_d256".into(), hq_fast_src);
830        sources.insert("hadamard_quantize_kv_fast_d512".into(), hq_fast_src);
831        // Track B (iter-21): higher-bit (5/6-bit) quantize kernels (byte-packed)
832        sources.insert("hadamard_quantize_kv_hb_d256".into(), hq_fast_src);
833        sources.insert("hadamard_quantize_kv_hb_d512".into(), hq_fast_src);
834        // ADR-028 iter-148: fused K+V single-position HB encoder
835        sources.insert("hadamard_quantize_kv_hb_dual_d256".into(), hq_fast_src);
836        sources.insert("hadamard_quantize_kv_hb_dual_d512".into(), hq_fast_src);
837
838        // iter-20 Leg F: TQ KV dequantize kernel (nibbles+norms → F32)
839        let tq_dq_src: &'static str = include_str!("shaders/tq_dequantize_kv.metal");
840        sources.insert("tq_dequantize_kv".into(), tq_dq_src);
841        // Track B (iter-21): higher-bit dequantize kernel (byte-packed indices)
842        sources.insert("tq_dequantize_hb_kv".into(), tq_dq_src);
843        // ADR-027 Phase B iter-30 (hf2q sub-sub-iter 23c-β.1): sequence-batch
844        // dequant variant. Same MSL source; new kernel entry point
845        // `tq_dequantize_hb_kv_seq` reads positions [start_pos..start_pos+n_tokens)
846        // in one dispatch (one threadgroup per (kv_head, position)). Unblocks
847        // hf2q's TQ-aware prefill SDPA path (current per-position kernel
848        // requires cur_len separate dispatches).
849        sources.insert("tq_dequantize_hb_kv_seq".into(), tq_dq_src);
850
851        // iter-24: native higher-bit (5/6/8-bit) TQ SDPA kernel (byte-packed K/V)
852        let tq_hb_src: &'static str = include_str!("shaders/flash_attn_vec_tq_hb.metal");
853        sources.insert("flash_attn_vec_tq_hb_dk256".into(), tq_hb_src);
854        sources.insert("flash_attn_vec_tq_hb_dk512".into(), tq_hb_src);
855
856        // GPU sampling kernels — eliminate logits readback (Phase 6)
857        let argmax_src: &'static str = include_str!("shaders/argmax.metal");
858        sources.insert("argmax_f32".into(), argmax_src);
859        let softmax_sample_src: &'static str =
860            include_str!("shaders/softmax_sample.metal");
861        sources.insert("softmax_sample_f32".into(), softmax_sample_src);
862        // Top-K kernel for Q8 rerank: avoids full-logits readback.
863        let top_k_src: &'static str = include_str!("shaders/top_k.metal");
864        sources.insert("top_k_f32".into(), top_k_src);
865
866        // MoE GPU routing + weighted reduce (ADR-013 P13.3 perf).
867        // Replaces CPU softmax+topk round-trip and CPU weighted accumulate.
868        let moe_stk_src: &'static str =
869            include_str!("shaders/moe_softmax_topk.metal");
870        sources.insert("moe_softmax_topk_f32".into(), moe_stk_src);
871        let moe_wr_src: &'static str =
872            include_str!("shaders/moe_weighted_reduce.metal");
873        sources.insert("moe_weighted_reduce_f32".into(), moe_wr_src);
874        let sdpa_decode_src: &'static str =
875            include_str!("shaders/sdpa_decode.metal");
876        sources.insert("sdpa_decode".into(), sdpa_decode_src);
877
878        Self {
879            cache: HashMap::new(),
880            sources,
881        }
882    }
883
884    /// Register a shader source at runtime (useful for testing and dynamic
885    /// kernel generation).
886    pub fn register_source(&mut self, name: impl Into<String>, source: &'static str) {
887        let name = name.into();
888        // Invalidate any cached pipeline for this name since the source changed.
889        self.cache.remove(&name);
890        self.sources.insert(name, source);
891    }
892
893    /// Get a compiled compute pipeline for the named kernel function.
894    ///
895    /// On first call for a given name, this compiles the MSL source into a
896    /// Metal library, extracts the named function, and creates a
897    /// `ComputePipelineState`.  Subsequent calls return the cached pipeline.
898    ///
899    /// # Errors
900    ///
901    /// * `MlxError::KernelNotFound` — no source registered for this name.
902    /// * `MlxError::ShaderCompilationError` — MSL compilation or pipeline
903    ///   creation failed.
904    pub fn get_pipeline(
905        &mut self,
906        name: &str,
907        device: &metal::DeviceRef,
908    ) -> Result<&ComputePipelineState> {
909        if !self.cache.contains_key(name) {
910            // Slow path: compile the shader.
911            let source = self.sources.get(name).ok_or_else(|| {
912                MlxError::KernelNotFound(name.to_string())
913            })?;
914
915            let compile_opts = metal::CompileOptions::new();
916            let library = device
917                .new_library_with_source(source, &compile_opts)
918                .map_err(|msg| MlxError::ShaderCompilationError {
919                    name: name.to_string(),
920                    message: msg,
921                })?;
922
923            let function = library
924                .get_function(name, None)
925                .map_err(|msg| MlxError::ShaderCompilationError {
926                    name: name.to_string(),
927                    message: msg,
928                })?;
929
930            // Build the pipeline through a descriptor so we can attach a
931            // human-readable label.  The label propagates into Instruments /
932            // xctrace Metal System Trace as the per-pipeline identifier
933            // (`metal-object-label` schema), giving us per-kernel attribution
934            // instead of the generic "Compute Command 0" placeholder.
935            //
936            // `MTLComputePipelineState.label` is read-only after creation per
937            // the Apple Metal spec; the only supported way to set it is via
938            // the descriptor before pipeline creation.  ADR-015 iter9b.
939            let descriptor = ComputePipelineDescriptor::new();
940            descriptor.set_compute_function(Some(&function));
941            descriptor.set_label(name);
942
943            let pipeline = device
944                .new_compute_pipeline_state(&descriptor)
945                .map_err(|msg| MlxError::ShaderCompilationError {
946                    name: name.to_string(),
947                    message: msg,
948                })?;
949
950            self.cache.insert(name.to_string(), pipeline);
951        }
952
953        // At this point the pipeline is guaranteed to be in the cache.
954        // We use `ok_or_else` instead of `expect` to satisfy the no-panic policy.
955        self.cache.get(name).ok_or_else(|| {
956            MlxError::KernelNotFound(name.to_string())
957        })
958    }
959
960    /// Get a compiled compute pipeline for the named kernel, specialized with
961    /// Metal function constants (both bool and i32 in one call).
962    ///
963    /// `bool_constants` contains `(index, value)` pairs mapping to
964    /// `[[function_constant(index)]]` bool declarations in the MSL shader.
965    /// `int_constants` contains `(index, value)` pairs mapping to
966    /// `[[function_constant(index)]]` int (int32_t) declarations in the MSL
967    /// shader.
968    ///
969    /// Pipelines are cached by a composite key:
970    /// `"<name>|<index>:b<0|1>|...|<index>:i<value>|..."`.  The 'b' prefix
971    /// marks bool entries and the 'i' prefix marks i32 entries, making the
972    /// format unambiguous regardless of constant ordering.  Distinct
973    /// `(name, constants)` combinations each compile to a separate pipeline;
974    /// the slow compilation path runs at most once per unique combination.
975    ///
976    /// # Errors
977    ///
978    /// * `MlxError::KernelNotFound` — no source registered for this name.
979    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
980    ///   specialisation, or pipeline creation failed.
981    pub fn get_pipeline_with_constants(
982        &mut self,
983        name: &str,
984        device: &metal::DeviceRef,
985        bool_constants: &[(usize, bool)],
986        int_constants: &[(usize, i32)],
987    ) -> Result<&ComputePipelineState> {
988        // Build a composite cache key so distinct constant combinations each
989        // compile to their own pipeline.  Bool entries use the 'b' type marker
990        // and i32 entries use 'i'; this prevents a collision between, e.g.,
991        // bool index 5 value 1 and int index 5 value 1.
992        let mut cache_key = name.to_string();
993        for &(index, value) in bool_constants {
994            cache_key.push('|');
995            cache_key.push_str(&index.to_string());
996            cache_key.push_str(if value { ":b1" } else { ":b0" });
997        }
998        for &(index, value) in int_constants {
999            cache_key.push('|');
1000            cache_key.push_str(&index.to_string());
1001            cache_key.push(':');
1002            cache_key.push('i');
1003            cache_key.push_str(&value.to_string());
1004        }
1005
1006        if !self.cache.contains_key(&cache_key) {
1007            // Slow path: compile the shader with function constant specialisation.
1008            let source = self.sources.get(name).ok_or_else(|| {
1009                MlxError::KernelNotFound(name.to_string())
1010            })?;
1011
1012            let compile_opts = metal::CompileOptions::new();
1013            let library = device
1014                .new_library_with_source(source, &compile_opts)
1015                .map_err(|msg| MlxError::ShaderCompilationError {
1016                    name: name.to_string(),
1017                    message: msg,
1018                })?;
1019
1020            // Build the FunctionConstantValues object with all bool and i32
1021            // constants.  Metal's set_constant_value_at_index reads the value
1022            // through a raw pointer; the pointed-to bytes must match the size
1023            // declared in the MSL shader (1 byte for bool, 4 bytes for int).
1024            let fcv = FunctionConstantValues::new();
1025
1026            for &(index, value) in bool_constants {
1027                // MTLDataType::Bool = 53 (metal-rs argument.rs).
1028                // The Metal runtime reads it as an Objective-C BOOL (uint8_t).
1029                let v: u8 = if value { 1 } else { 0 };
1030                fcv.set_constant_value_at_index(
1031                    (&v as *const u8).cast::<std::ffi::c_void>(),
1032                    MTLDataType::Bool,
1033                    index as u64,
1034                );
1035            }
1036
1037            for &(index, value) in int_constants {
1038                // MTLDataType::Int = 29 (metal-rs argument.rs).
1039                // The Metal runtime reads 4 bytes as a signed 32-bit integer,
1040                // matching the Metal shader type `constant int`.
1041                fcv.set_constant_value_at_index(
1042                    (&value as *const i32).cast::<std::ffi::c_void>(),
1043                    MTLDataType::Int,
1044                    index as u64,
1045                );
1046            }
1047
1048            let function = library
1049                .get_function(name, Some(fcv))
1050                .map_err(|msg| MlxError::ShaderCompilationError {
1051                    name: name.to_string(),
1052                    message: msg,
1053                })?;
1054
1055            // Label this specialisation with the full composite cache key
1056            // (e.g. `kernel_mul_mv_q4_0_f32|0:b1|3:i32`) so xctrace Metal
1057            // System Trace shows each function-constant variant as a distinct
1058            // pipeline.  Without this, all specialisations share a generic
1059            // "Compute Command 0" identifier and we cannot attribute µs/token
1060            // to a specific (kernel, constants) combination.  ADR-015 iter9b.
1061            let descriptor = ComputePipelineDescriptor::new();
1062            descriptor.set_compute_function(Some(&function));
1063            descriptor.set_label(&cache_key);
1064
1065            let pipeline = device
1066                .new_compute_pipeline_state(&descriptor)
1067                .map_err(|msg| MlxError::ShaderCompilationError {
1068                    name: name.to_string(),
1069                    message: msg,
1070                })?;
1071
1072            self.cache.insert(cache_key.clone(), pipeline);
1073        }
1074
1075        self.cache.get(&cache_key).ok_or_else(|| {
1076            MlxError::KernelNotFound(name.to_string())
1077        })
1078    }
1079
1080    /// Get a compiled compute pipeline for the named kernel, specialized with
1081    /// Metal bool function constants.
1082    ///
1083    /// The `bool_constants` slice contains `(index, value)` pairs.  Each pair
1084    /// maps to a `[[function_constant(index)]]` declaration in the MSL shader.
1085    ///
1086    /// This is a thin wrapper around [`get_pipeline_with_constants`] that
1087    /// passes an empty `int_constants` slice.  Existing callers continue to
1088    /// work without modification; the cache-key format for pure-bool pipelines
1089    /// is compatible (bool entries carry the 'b' type marker, which is the
1090    /// only format ever written by this wrapper).
1091    ///
1092    /// # Errors
1093    ///
1094    /// * `MlxError::KernelNotFound` — no source registered for this name.
1095    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
1096    ///   specialisation, or pipeline creation failed.
1097    pub fn get_pipeline_with_bool_constants(
1098        &mut self,
1099        name: &str,
1100        device: &metal::DeviceRef,
1101        bool_constants: &[(usize, bool)],
1102    ) -> Result<&ComputePipelineState> {
1103        self.get_pipeline_with_constants(name, device, bool_constants, &[])
1104    }
1105
1106    /// Check if a pipeline for the given name is already compiled and cached.
1107    pub fn is_cached(&self, name: &str) -> bool {
1108        self.cache.contains_key(name)
1109    }
1110
1111    /// Number of compiled pipelines currently in the cache.
1112    pub fn cached_count(&self) -> usize {
1113        self.cache.len()
1114    }
1115
1116    /// Number of registered shader sources.
1117    pub fn source_count(&self) -> usize {
1118        self.sources.len()
1119    }
1120}
1121
1122impl Default for KernelRegistry {
1123    fn default() -> Self {
1124        Self::new()
1125    }
1126}
1127
1128#[cfg(test)]
1129mod tests {
1130    use super::*;
1131
1132    /// Minimal Metal shader that uses a single int function constant.
1133    ///
1134    /// The kernel writes the constant value N into the first element of the
1135    /// output buffer, allowing the test to verify that the Metal compiler
1136    /// actually sees distinct specialisations for N=4 and N=8.
1137    ///
1138    /// The shader is intentionally trivial — we only need it to *compile* with
1139    /// an int function constant; correctness of the kernel logic is not under
1140    /// test here.
1141    const INT_FC_TEST_SHADER: &str = r#"
1142#include <metal_stdlib>
1143using namespace metal;
1144
1145constant int test_N [[function_constant(100)]];
1146
1147kernel void int_fc_test_kernel(
1148    device int* out [[buffer(0)]],
1149    uint tid [[thread_position_in_grid]])
1150{
1151    if (tid == 0) {
1152        out[0] = test_N;
1153    }
1154}
1155"#;
1156
1157    /// Verify that `get_pipeline_with_constants` produces distinct cached
1158    /// pipelines for different i32 function-constant values, and that
1159    /// `get_pipeline_with_bool_constants` (the backward-compat wrapper) still
1160    /// works correctly with the new 'b'-prefixed cache-key format.
1161    ///
1162    /// This test requires a real Metal device and is therefore marked
1163    /// `#[ignore]` on non-Apple platforms, but runs unconditionally on macOS.
1164    #[test]
1165    fn test_int_fc_distinct_pipelines_and_bool_compat() {
1166        let device = metal::Device::system_default()
1167            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
1168
1169        let mut registry = KernelRegistry::new();
1170
1171        // Register the inline test shader under a name that cannot collide with
1172        // any production kernel.
1173        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
1174
1175        // Compile with N=4.
1176        let p4_ptr = registry
1177            .get_pipeline_with_constants(
1178                "int_fc_test_kernel",
1179                &device,
1180                &[],                  // no bool constants
1181                &[(100, 4_i32)],      // int constant index 100 = 4
1182            )
1183            .expect("pipeline N=4 should compile") as *const _;
1184
1185        // Cache must now have exactly 1 entry for this kernel.
1186        // (Other production kernels may already be in cache from new(); here
1187        // we check that the N=4 key was inserted.)
1188        let count_after_n4 = registry.cached_count();
1189
1190        // Compile with N=8 — must produce a SEPARATE pipeline.
1191        let p8_ptr = registry
1192            .get_pipeline_with_constants(
1193                "int_fc_test_kernel",
1194                &device,
1195                &[],
1196                &[(100, 8_i32)],
1197            )
1198            .expect("pipeline N=8 should compile") as *const _;
1199
1200        // Cache must have grown by exactly 1.
1201        assert_eq!(
1202            registry.cached_count(),
1203            count_after_n4 + 1,
1204            "N=8 must produce a new cache entry"
1205        );
1206
1207        // The two pipelines must be distinct objects in the cache.
1208        assert_ne!(
1209            p4_ptr, p8_ptr,
1210            "N=4 and N=8 specialisations must be separate ComputePipelineState objects"
1211        );
1212
1213        // A second call with N=4 must return the SAME pipeline (cache hit, no
1214        // new compilation).
1215        let p4_again_ptr = registry
1216            .get_pipeline_with_constants(
1217                "int_fc_test_kernel",
1218                &device,
1219                &[],
1220                &[(100, 4_i32)],
1221            )
1222            .expect("pipeline N=4 cache hit should succeed") as *const _;
1223
1224        assert_eq!(
1225            registry.cached_count(),
1226            count_after_n4 + 1,
1227            "repeated N=4 call must be a cache hit, not a new entry"
1228        );
1229        assert_eq!(
1230            p4_ptr, p4_again_ptr,
1231            "repeated N=4 call must return the same pipeline pointer"
1232        );
1233
1234        // Verify backward compatibility: get_pipeline_with_bool_constants must
1235        // still route through get_pipeline_with_constants and produce a cached
1236        // pipeline without panicking.
1237        //
1238        // We register a separate bool-constant shader that does NOT use a bool
1239        // function constant (so the Metal compiler ignores missing FCs for
1240        // this trivial case) — but the call path and cache-key format are what
1241        // matter here.  We reuse the int_fc_test_kernel source; the bool FC is
1242        // simply unused by the shader (Metal allows unused FCs when the shader
1243        // declares them with `function_constant` but the value is never read).
1244        //
1245        // To avoid a Metal compiler error for an undeclared function constant,
1246        // we register a separate bare-kernel shader for the bool wrapper test.
1247        const BARE_SHADER: &str = r#"
1248#include <metal_stdlib>
1249using namespace metal;
1250kernel void bare_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
1251    if (tid == 0) { out[0] = 42; }
1252}
1253"#;
1254        registry.register_source("bare_kernel", BARE_SHADER);
1255
1256        let count_before_bool = registry.cached_count();
1257        let _bool_pipeline = registry
1258            .get_pipeline_with_bool_constants("bare_kernel", &device, &[])
1259            .expect("bool-constants wrapper with empty slice must succeed");
1260
1261        assert_eq!(
1262            registry.cached_count(),
1263            count_before_bool + 1,
1264            "bool-constants wrapper must insert one new cache entry"
1265        );
1266    }
1267
1268    /// Verify that the `MTLComputePipelineState.label` produced by
1269    /// `get_pipeline` and `get_pipeline_with_constants` actually propagates
1270    /// from the descriptor to the resulting pipeline state.
1271    ///
1272    /// This is the in-process smoke check for ADR-015 iter9b: we cannot
1273    /// reach into xctrace from Rust, but we can read back the same `label`
1274    /// property xctrace consumes via `ComputePipelineStateRef::label()`.
1275    /// If labels are missing or wrong here, the MST trace will also show
1276    /// generic identifiers — so this test gates the iter9 retry's
1277    /// per-Q4_0-kernel attribution.
1278    #[test]
1279    fn test_pipeline_labels_propagate_for_mst() {
1280        let device = metal::Device::system_default()
1281            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
1282
1283        let mut registry = KernelRegistry::new();
1284
1285        // Reuse the same trivial shaders as the int-FC test.
1286        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
1287
1288        const BARE_SHADER_LABEL_TEST: &str = r#"
1289#include <metal_stdlib>
1290using namespace metal;
1291kernel void label_smoke_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
1292    if (tid == 0) { out[0] = 7; }
1293}
1294"#;
1295        registry.register_source("label_smoke_kernel", BARE_SHADER_LABEL_TEST);
1296
1297        // Plain get_pipeline path — label must equal the kernel name.
1298        // Capture as owned String so the cache borrow is released before
1299        // the next get_pipeline_with_constants call below.
1300        let plain_label = registry
1301            .get_pipeline("label_smoke_kernel", &device)
1302            .expect("plain pipeline must compile")
1303            .label()
1304            .to_string();
1305        assert_eq!(
1306            plain_label, "label_smoke_kernel",
1307            "get_pipeline must label the pipeline with the kernel name (xctrace MST attribution)"
1308        );
1309
1310        // Constants path — label must equal the composite cache key so each
1311        // function-constant variant is individually attributable in MST.
1312        // We capture the label as an owned String to release the borrow on
1313        // the cache before fetching the next specialisation.
1314        let label_v7 = registry
1315            .get_pipeline_with_constants(
1316                "int_fc_test_kernel",
1317                &device,
1318                &[],
1319                &[(100, 7_i32)],
1320            )
1321            .expect("specialised pipeline must compile")
1322            .label()
1323            .to_string();
1324        assert_eq!(
1325            label_v7, "int_fc_test_kernel|100:i7",
1326            "get_pipeline_with_constants must label with the cache_key so each \
1327             specialisation is distinct in xctrace MST"
1328        );
1329
1330        // A second specialisation must produce a different label.
1331        let label_v13 = registry
1332            .get_pipeline_with_constants(
1333                "int_fc_test_kernel",
1334                &device,
1335                &[],
1336                &[(100, 13_i32)],
1337            )
1338            .expect("second specialised pipeline must compile")
1339            .label()
1340            .to_string();
1341        assert_eq!(label_v13, "int_fc_test_kernel|100:i13");
1342        assert_ne!(
1343            label_v7, label_v13,
1344            "distinct constant values must yield distinct pipeline labels"
1345        );
1346    }
1347}