Skip to main content

mlx_native/
kernel_registry.rs

1//! [`KernelRegistry`] — lazy compilation and caching of Metal compute pipelines.
2//!
3//! MSL shader source is embedded at compile time via `include_str!`.  On first
4//! access, the source is compiled into a Metal library, the named function is
5//! extracted, and a `ComputePipelineState` is created and cached.  Subsequent
6//! calls return the cached pipeline.
7
8use std::collections::HashMap;
9
10use metal::{ComputePipelineDescriptor, ComputePipelineState, FunctionConstantValues, MTLDataType};
11
12use crate::error::{MlxError, Result};
13
14// MTLDataType numeric values (from metal-rs argument.rs, confirmed in Apple Metal spec):
15//   Int  = 29
16//   Bool = 53
17// These are used when calling set_constant_value_at_index so the Metal runtime
18// knows how wide each constant value is.
19
20/// Registry that lazily compiles and caches Metal compute pipelines from
21/// embedded MSL source.
22///
23/// # Usage
24///
25/// ```ignore
26/// let mut registry = KernelRegistry::new();
27/// let pipeline = registry.get_pipeline("elementwise_add", device.metal_device())?;
28/// encoder.encode(&pipeline, &buffers, grid, tg);
29/// ```
30///
31/// # Thread Safety
32///
33/// `KernelRegistry` is **not** `Sync` by default (it uses `&mut self` for
34/// `get_pipeline` to allow mutable cache insertion).  If you need concurrent
35/// access, wrap it in a `Mutex` or use one registry per thread.
36pub struct KernelRegistry {
37    /// Cached pipelines keyed by kernel function name.
38    cache: HashMap<String, ComputePipelineState>,
39    /// MSL source text keyed by kernel function name.
40    ///
41    /// Populated at construction time with all embedded shader sources.
42    sources: HashMap<String, &'static str>,
43}
44
45impl KernelRegistry {
46    /// Create a new registry with all embedded shader sources pre-registered.
47    ///
48    /// No compilation happens here — shaders are compiled lazily on first use.
49    pub fn new() -> Self {
50        let mut sources = HashMap::new();
51
52        // Register embedded shader sources.
53        sources.insert(
54            "placeholder".into(),
55            include_str!("shaders/placeholder.metal"),
56        );
57        sources.insert(
58            "quantized_matmul".into(),
59            include_str!("shaders/quantized_matmul.metal"),
60        );
61        sources.insert(
62            "quantized_matmul_simd".into(),
63            include_str!("shaders/quantized_matmul.metal"),
64        );
65        sources.insert(
66            "quantized_matmul_simd_bf16".into(),
67            include_str!("shaders/quantized_matmul.metal"),
68        );
69        sources.insert(
70            "quantized_matmul_simd_bf16_expert".into(),
71            include_str!("shaders/quantized_matmul.metal"),
72        );
73
74        // GGML block-format quantized mat-vec kernels (ADR-006 Phase 3)
75        let ggml_src: &'static str =
76            include_str!("shaders/quantized_matmul_ggml.metal");
77        sources.insert("kernel_mul_mv_q4_0_f32".into(), ggml_src);
78        sources.insert("kernel_mul_mv_q8_0_f32".into(), ggml_src);
79        sources.insert("kernel_mul_mv_q6_K_f32".into(), ggml_src);
80        // ADR-013 P7 — Q4_K dense decode mat-vec (port of llama.cpp's
81        // kernel_mul_mv_q4_K_f32 at ggml-metal.metal:7715-7821).
82        sources.insert("kernel_mul_mv_q4_K_f32".into(), ggml_src);
83
84        // GGML block-format quantized matrix-matrix kernels
85        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's kernel_mul_mm_<q>_f32).
86        // Used at prefill m > 8 to reuse each weight tile across a 32-row
87        // block via threadgroup-staged simdgroup MMA, instead of re-reading
88        // every block per prompt-token as the mv kernel does.
89        let ggml_mm_src: &'static str =
90            include_str!("shaders/quantized_matmul_mm.metal");
91        sources.insert("kernel_mul_mm_q4_0_f32".into(), ggml_mm_src);
92        sources.insert("kernel_mul_mm_q8_0_f32".into(), ggml_mm_src);
93        sources.insert("kernel_mul_mm_q6_K_f32".into(), ggml_mm_src);
94
95        // GGML block-format quantized matrix-matrix kernels — tensor API
96        // variant (ADR-011 Phase 3 Wave P3b-tensor: port of llama.cpp's
97        // kernel_mul_mm_impl `#ifdef GGML_METAL_HAS_TENSOR` branch).
98        // Uses Apple's MetalPerformancePrimitives `tensor_ops::matmul2d`
99        // primitive which on M3+ dispatches to hardware tensor cores for
100        // 2-3x the effective FLOP throughput vs the simdgroup MMA path.
101        // Only compiled on devices where the tensor API is available; the
102        // kernel_registry's runtime-probe (see MlxDevice::has_tensor) gates
103        // compilation so non-tensor devices transparently fall back to the
104        // non-tensor `kernel_mul_mm_<q>_f32` kernels.
105        let ggml_mm_tensor_src: &'static str =
106            include_str!("shaders/quantized_matmul_mm_tensor.metal");
107        sources.insert("kernel_mul_mm_q4_0_tensor_f32".into(), ggml_mm_tensor_src);
108        sources.insert("kernel_mul_mm_q4_0_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
109        sources.insert("kernel_mul_mm_q6_K_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
110        sources.insert("kernel_mul_mm_q8_0_tensor_f32".into(), ggml_mm_tensor_src);
111        sources.insert("kernel_mul_mm_q6_K_tensor_f32".into(), ggml_mm_tensor_src);
112
113        // Dense bf16×f32 → f32 tensor-API matmul (non-flash-attention
114        // prefill Q@K^T and scores@V, modeled on llama.cpp's
115        // kernel_mul_mm_bf16_f32 with the GGML_METAL_HAS_TENSOR branch
116        // active).  Tile geometry and write-back identical to the
117        // quantized tensor kernel; only the A-stage copy (bfloat →
118        // bfloat, no dequantize) differs.
119        let dense_mm_bf16_tensor_src: &'static str =
120            include_str!("shaders/dense_mm_bf16_tensor.metal");
121        sources.insert("hf2q_dense_mm_bf16_f32_tensor".into(), dense_mm_bf16_tensor_src);
122
123        // Dense f32×f32 → f32 tensor-API matmul (F32-everywhere
124        // sibling of dense_mm_bf16_tensor).  Used by hf2q's ADR-005
125        // iter-118 BF16-vs-F32 ViT attention A/B diagnostic to remove
126        // the BF16 K-stage cast as a confounding variable.  Port of
127        // llama.cpp's kernel_mul_mm_f32_f32 specialization
128        // (ggml-metal.metal:10098) on the GGML_METAL_HAS_TENSOR
129        // branch.  Same tile geometry (NR0=64 NR1=32 NK=32) but
130        // float-everywhere shmem staging.
131        let dense_mm_f32_f32_tensor_src: &'static str =
132            include_str!("shaders/dense_mm_f32_f32.metal");
133        sources.insert("hf2q_dense_mm_f32_f32_tensor".into(), dense_mm_f32_f32_tensor_src);
134
135        // Dense f16×f32 → f32 tensor-API matmul (F16-staging sibling
136        // of dense_mm_bf16_tensor).  Used by hf2q's ADR-005 Phase 2c
137        // iter-128 gemma4v ViT precision-parity path: every mmproj
138        // weight is stored as F16 in GGUF, peer's `kernel_mul_mm_f16_f32`
139        // (`ggml-metal.metal:10099`) stages BOTH A and B as `half` in
140        // shmem and computes on `simdgroup_half8x8`.  Matches peer
141        // per-element rounding budget exactly (10-bit mantissa vs
142        // BF16's 7-bit), closing the 1.16x/block cascade compound that
143        // iter-127 numerically bisected to BF16 staging.  Same tile
144        // geometry as the BF16 sibling (NR0=64 NR1=32 NK=32, 8 KB
145        // shmem) — half and bfloat share 16-bit storage.
146        let dense_mm_f16_tensor_src: &'static str =
147            include_str!("shaders/dense_mm_f16_tensor.metal");
148        sources.insert("hf2q_dense_mm_f16_f32_tensor".into(), dense_mm_f16_tensor_src);
149
150        // Dense bf16×f32 → f32 GEMV (matrix-vector multiply) — optimized
151        // for M=1 single-token decode.  Port of llama.cpp's
152        // kernel_mul_mv_bf16_f32_4 (bfloat4-vectorized GEMV kernel).
153        // Used in apply_linear_projection_f32 when seq_len=1 and the
154        // weight matrix is BF16, replacing the MM kernel (~2× faster for
155        // M=1 due to better memory bandwidth utilization per thread).
156        let dense_gemv_bf16_src: &'static str =
157            include_str!("shaders/dense_gemv_bf16.metal");
158        sources.insert("hf2q_dense_gemv_bf16_f32_4".into(), dense_gemv_bf16_src);
159
160        // Fused scale-mask-softmax for the non-flash-attention prefill
161        // path.  One row-local threadgroup per (head, query) pair
162        // replaces three separate dispatches (scale, mask-add, softmax);
163        // reads a bf16 mask (-INF at masked positions, matching
164        // flash_attn_prefill_mask.metal) that is shared across heads.
165        let scale_mask_softmax_src: &'static str =
166            include_str!("shaders/scale_mask_softmax.metal");
167        sources.insert("scale_mask_softmax_f32".into(), scale_mask_softmax_src);
168
169        // Expert-routed (MoE) quantized matmul kernel (Story 2.1)
170        sources.insert(
171            "quantized_matmul_id".into(),
172            include_str!("shaders/quantized_matmul_id.metal"),
173        );
174
175        // Expert-routed (MoE) GGML block-format quantized matmul kernels
176        let ggml_id_src: &'static str =
177            include_str!("shaders/quantized_matmul_id_ggml.metal");
178        sources.insert("kernel_mul_mv_id_q4_0_f32".into(), ggml_id_src);
179        sources.insert("kernel_mul_mv_id_q8_0_f32".into(), ggml_id_src);
180        // ADR-013 P7 — Q4_K MoE expert-routed mat-vec (port of
181        // llama.cpp's kernel_mul_mv_id_q4_K_f32 at ggml-metal.metal:10349).
182        sources.insert("kernel_mul_mv_id_q4_K_f32".into(), ggml_id_src);
183        sources.insert("kernel_mul_mv_id_q5_K_f32".into(), ggml_id_src);
184        sources.insert("kernel_mul_mv_id_q6_K_f32".into(), ggml_id_src);
185        // Fused-SwiGLU mv_id variants (ADR-012 §Optimize / Task #15):
186        // computes y[r][n] = sum_k(dequant(W[expert][n][k]) * silu(gate[r][k]) * up[r][k])
187        // in one dispatch — replaces silu_mul + expert_down sequence.
188        sources.insert("kernel_mul_mv_id_q4_0_f32_swiglu".into(), ggml_id_src);
189
190        // Expert-routed (MoE) GGML block-format QUANTIZED MATRIX-MATRIX kernels
191        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's
192        // `kernel_mul_mm_id_map0_ne20_N` + `kernel_mul_mm_id_<q>_f32`).
193        // Two-stage dispatch: map0 regroups the token-to-expert table into
194        // per-expert routed-token lists, then mm_id stages a 64x32 expert
195        // weight tile into threadgroup shmem and reuses it across a 32-row
196        // block of that expert's routed tokens.
197        let ggml_id_mm_src: &'static str =
198            include_str!("shaders/quantized_matmul_id_mm.metal");
199        sources.insert("kernel_mul_mm_id_map0_ne20_1".into(), ggml_id_mm_src);
200        sources.insert("kernel_mul_mm_id_map0_ne20_8".into(), ggml_id_mm_src);
201        sources.insert("kernel_mul_mm_id_q4_0_f32".into(), ggml_id_mm_src);
202        sources.insert("kernel_mul_mm_id_q8_0_f32".into(), ggml_id_mm_src);
203        sources.insert("kernel_mul_mm_id_q6_K_f32".into(), ggml_id_mm_src);
204        // ADR-013 P16 — Q4_K mm_id (port of llama.cpp ggml-metal.metal:10169).
205        sources.insert("kernel_mul_mm_id_q4_K_f32".into(), ggml_id_mm_src);
206
207        // MoE-routed quantized matrix-matrix kernels — tensor API variant
208        // (ADR-011 Phase 3 Wave P3b-tensor).  Uses the MPP tensor_ops
209        // matmul2d primitive for hardware-tensor-core MMA on M3+.  Only
210        // the mm_id kernel is ported — map0 is a short pre-pass (not
211        // matmul) and continues to use the simdgroup version.
212        let ggml_id_mm_tensor_src: &'static str =
213            include_str!("shaders/quantized_matmul_id_mm_tensor.metal");
214        sources.insert("kernel_mul_mm_id_q4_0_tensor_f32".into(), ggml_id_mm_tensor_src);
215        sources.insert("kernel_mul_mm_id_q8_0_tensor_f32".into(), ggml_id_mm_tensor_src);
216        sources.insert("kernel_mul_mm_id_q6_K_tensor_f32".into(), ggml_id_mm_tensor_src);
217        // ADR-013 P16 — Q4_K tensor-API mm_id.
218        sources.insert("kernel_mul_mm_id_q4_K_tensor_f32".into(), ggml_id_mm_tensor_src);
219
220        // Embedding kernels (Story 1.5)
221        let embedding_src: &'static str = include_str!("shaders/embedding.metal");
222        sources.insert("embedding_gather_4bit".into(), embedding_src);
223        sources.insert("embedding_gather_6bit".into(), embedding_src);
224
225        // MoE gate kernel (Story 1.5)
226        let moe_gate_src: &'static str = include_str!("shaders/moe_gate.metal");
227        sources.insert("moe_gate".into(), moe_gate_src);
228
229        // MoE dispatch kernels (Story 1.5)
230        let moe_dispatch_src: &'static str = include_str!("shaders/moe_dispatch.metal");
231        sources.insert("fused_gelu_mul".into(), moe_dispatch_src);
232        sources.insert("moe_swiglu_fused".into(), moe_dispatch_src);
233        sources.insert("moe_swiglu_batch".into(), moe_dispatch_src);
234        sources.insert("moe_swiglu_seq".into(), moe_dispatch_src);
235        sources.insert("moe_accumulate".into(), moe_dispatch_src);
236        sources.insert("moe_weighted_sum".into(), moe_dispatch_src);
237        sources.insert("moe_weighted_sum_seq".into(), moe_dispatch_src);
238        sources.insert("zero_buffer".into(), moe_dispatch_src);
239        sources.insert("naive_matvec_f32".into(), moe_dispatch_src);
240        sources.insert("moe_gather_topk_weights".into(), moe_dispatch_src);
241        // bf16 variants (Phase 2 bf16 activation path)
242        sources.insert("fused_gelu_mul_bf16".into(), moe_dispatch_src);
243        sources.insert("moe_swiglu_seq_bf16".into(), moe_dispatch_src);
244        sources.insert("moe_weighted_sum_seq_bf16_input".into(), moe_dispatch_src);
245
246        // Batched KV cache copy kernels
247        let kv_cache_src: &'static str = include_str!("shaders/kv_cache_copy.metal");
248        sources.insert("kv_cache_copy_batch_f32".into(), kv_cache_src);
249        sources.insert("kv_cache_copy_batch_f32_to_f16".into(), kv_cache_src);
250        sources.insert("kv_cache_copy_seq_f32".into(), kv_cache_src);
251        sources.insert("kv_cache_copy_seq_f32_to_f16".into(), kv_cache_src);
252        // Wave P4.11 — fused K+V copy variants
253        sources.insert("kv_cache_copy_seq_f32_kv_dual".into(), kv_cache_src);
254        sources.insert("kv_cache_copy_seq_f32_to_f16_kv_dual".into(), kv_cache_src);
255        // bf16-source KV cache copy (Phase 2 bf16 activation path)
256        sources.insert("kv_cache_copy_seq_bf16".into(), kv_cache_src);
257
258        // Elementwise and transpose kernels (Story 1.5)
259        let elementwise_src: &'static str = include_str!("shaders/elementwise.metal");
260        sources.insert("elementwise_add_f32".into(), elementwise_src);
261        sources.insert("elementwise_add_f16".into(), elementwise_src);
262        sources.insert("elementwise_mul_f32".into(), elementwise_src);
263        sources.insert("elementwise_mul_f16".into(), elementwise_src);
264        sources.insert("elementwise_add_bf16".into(), elementwise_src);
265        sources.insert("elementwise_mul_bf16".into(), elementwise_src);
266        sources.insert("cast_f16_to_f32".into(), elementwise_src);
267        sources.insert("cast_f32_to_f16".into(), elementwise_src);
268        sources.insert("cast_bf16_to_f32".into(), elementwise_src);
269        sources.insert("cast_f32_to_bf16".into(), elementwise_src);
270        sources.insert("scalar_mul_bf16".into(), elementwise_src);
271        sources.insert("scalar_mul_f32".into(), elementwise_src);
272        sources.insert("embedding_gather_scale_f32".into(), elementwise_src);
273        sources.insert("embedding_gather_scale_batch_f32".into(), elementwise_src);
274        sources.insert("permute_021_bf16".into(), elementwise_src);
275        sources.insert("transpose_last2_bf16".into(), elementwise_src);
276        sources.insert("transpose_last2_f16".into(), elementwise_src);
277        sources.insert("permute_021_f32".into(), elementwise_src);
278        sources.insert("permute_021_bf16_to_f32".into(), elementwise_src);
279        sources.insert("transpose_2d_f32".into(), elementwise_src);
280        sources.insert("transpose_2d_f16".into(), elementwise_src);
281
282        // Attention kernels (Story 1.3)
283        let sdpa_src: &'static str = include_str!("shaders/sdpa.metal");
284        sources.insert("sdpa".into(), sdpa_src);
285        sources.insert("sdpa_bf16".into(), sdpa_src);
286        let sdpa_sliding_src: &'static str = include_str!("shaders/sdpa_sliding.metal");
287        sources.insert("sdpa_sliding".into(), sdpa_sliding_src);
288        sources.insert("sdpa_sliding_bf16".into(), sdpa_sliding_src);
289
290        // Flash-attention tiled prefill kernel (ADR-011 Phase 1).
291        // Ten entry points; all backed by the same shader source.
292        // Pipelines are compiled with function constants via
293        // `get_pipeline_with_bool_constants` — not `get_pipeline`.
294        let flash_attn_prefill_src: &'static str =
295            include_str!("shaders/flash_attn_prefill.metal");
296        // D=256 variants (BQ=32, BK=16, WM=4, WN=1 — 128 threads/threadgroup)
297        sources.insert(
298            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskfloat32".into(),
299            flash_attn_prefill_src,
300        );
301        sources.insert(
302            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
303            flash_attn_prefill_src,
304        );
305        sources.insert(
306            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbfloat16".into(),
307            flash_attn_prefill_src,
308        );
309        sources.insert(
310            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
311            flash_attn_prefill_src,
312        );
313        sources.insert(
314            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskfloat16".into(),
315            flash_attn_prefill_src,
316        );
317        sources.insert(
318            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
319            flash_attn_prefill_src,
320        );
321        // D=512 variants (BQ=8, BK=8, WM=1, WN=1 — 32 threads/threadgroup)
322        // NOTE: f32 at D=512 is NOT instantiated — threadgroup memory exceeds
323        // the 32 KB Metal limit (candle sdpa.rs:86-94).
324        sources.insert(
325            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbfloat16".into(),
326            flash_attn_prefill_src,
327        );
328        sources.insert(
329            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
330            flash_attn_prefill_src,
331        );
332        sources.insert(
333            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskfloat16".into(),
334            flash_attn_prefill_src,
335        );
336        sources.insert(
337            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
338            flash_attn_prefill_src,
339        );
340
341        // Flash attention vector kernels — SIMD-vectorized decode-path SDPA
342        // (ported from llama.cpp flash_attn_ext_vec)
343        let flash_attn_vec_src: &'static str =
344            include_str!("shaders/flash_attn_vec.metal");
345        sources.insert("flash_attn_vec_dk256".into(), flash_attn_vec_src);
346        sources.insert("flash_attn_vec_dk512".into(), flash_attn_vec_src);
347        sources.insert("flash_attn_vec_reduce_dk256".into(), flash_attn_vec_src);
348        sources.insert("flash_attn_vec_reduce_dk512".into(), flash_attn_vec_src);
349        // F16 KV variants (Phase 4a)
350        sources.insert("flash_attn_vec_f16kv_dk256".into(), flash_attn_vec_src);
351        sources.insert("flash_attn_vec_f16kv_dk512".into(), flash_attn_vec_src);
352
353        // RoPE, normalization, activation kernels (Story 1.4)
354        let rope_src: &'static str = include_str!("shaders/rope.metal");
355        sources.insert("rope_f32".into(), rope_src);
356        sources.insert("rope_f16".into(), rope_src);
357        sources.insert("rope_bf16".into(), rope_src);
358        sources.insert("rope_neox_bf16".into(), rope_src);
359        sources.insert("rope_neox_f32".into(), rope_src);
360        let rms_norm_src: &'static str = include_str!("shaders/rms_norm.metal");
361        sources.insert("rms_norm_f32".into(), rms_norm_src);
362        sources.insert("rms_norm_f16".into(), rms_norm_src);
363        sources.insert("rms_norm_bf16".into(), rms_norm_src);
364        sources.insert("rms_norm_no_scale_bf16".into(), rms_norm_src);
365        sources.insert("rms_norm_no_scale_f32".into(), rms_norm_src);
366        sources.insert("rms_norm_no_scale_f32_dual".into(), rms_norm_src);
367        sources.insert("rms_norm_f32_triple".into(), rms_norm_src);
368        sources.insert("fused_post_attn_triple_norm_f32".into(), rms_norm_src);
369        sources.insert("rms_norm_no_scale_f32_dual_perm".into(), rms_norm_src);
370        // Fused RMS norm + elementwise multiply kernels (Phase 4e.2)
371        sources.insert("rms_norm_mul_f32".into(), rms_norm_src);
372        sources.insert("rms_norm_mul_f16".into(), rms_norm_src);
373        sources.insert("rms_norm_mul_bf16".into(), rms_norm_src);
374        // L2 norm kernels (ADR-013 Decision 3 — Gated DeltaNet Q/K norm)
375        let l2_norm_src: &'static str = include_str!("shaders/l2_norm.metal");
376        sources.insert("l2_norm_f32".into(), l2_norm_src);
377        sources.insert("l2_norm_f16".into(), l2_norm_src);
378        sources.insert("l2_norm_bf16".into(), l2_norm_src);
379        // Cumulative-sum kernels (ADR-013 Decision 4 — DeltaNet decay-mask base)
380        let cumsum_src: &'static str = include_str!("shaders/cumsum.metal");
381        sources.insert("cumsum_f32".into(), cumsum_src);
382        sources.insert("cumsum_bf16".into(), cumsum_src);
383        // SSM conv kernels (ADR-013 Decision 7 — DeltaNet 1D causal conv + SiLU)
384        let ssm_conv_src: &'static str = include_str!("shaders/ssm_conv.metal");
385        sources.insert("ssm_conv_forward_f32".into(), ssm_conv_src);
386        sources.insert("ssm_conv_forward_bf16".into(), ssm_conv_src);
387        sources.insert("ssm_conv_state_update_f32".into(), ssm_conv_src);
388        sources.insert("ssm_conv_state_update_bf16".into(), ssm_conv_src);
389        // Tri-solve kernels (ADR-013 Decision 5 — chunked DeltaNet debug path)
390        let tri_solve_src: &'static str = include_str!("shaders/tri_solve.metal");
391        sources.insert("tri_solve_lower_unit_f32".into(), tri_solve_src);
392        sources.insert("tri_solve_lower_unit_bf16".into(), tri_solve_src);
393        // Rope-multi kernels (ADR-013 Decision 10 — IMROPE for Qwen3.5)
394        let rope_multi_src: &'static str = include_str!("shaders/rope_multi.metal");
395        sources.insert("rope_multi_f32".into(), rope_multi_src);
396        sources.insert("rope_multi_bf16".into(), rope_multi_src);
397        // Gated DeltaNet fused kernel (ADR-013 Decision 6 — centerpiece)
398        let gdn_src: &'static str = include_str!("shaders/gated_delta_net.metal");
399        sources.insert("gated_delta_net_f32".into(), gdn_src);
400        // ADR-015 iter56 — decode-only `simd_sum` variant. Three NSG-templated
401        // host names share the same source; selection is by D_k via
402        // `dispatch_gated_delta_net_decode`. Drop-in for the fused kernel
403        // above when n_tokens=1.
404        let gdn_decode_src: &'static str =
405            include_str!("shaders/gated_delta_net_decode.metal");
406        sources.insert("gated_delta_net_decode_f32_1".into(), gdn_decode_src);
407        sources.insert("gated_delta_net_decode_f32_2".into(), gdn_decode_src);
408        sources.insert("gated_delta_net_decode_f32_4".into(), gdn_decode_src);
409        // Wave 5b — chunk-parallel inter-chunk state-recurrence kernel
410        // (the one new kernel in the chunk-parallel pipeline; spec source:
411        // arXiv 2412.06464 §4 + FLA chunk_delta_h.py:43-298).
412        let gdn_chunk_src: &'static str =
413            include_str!("shaders/gated_delta_net_chunk.metal");
414        sources.insert(
415            "gated_delta_net_chunk_inter_state_bf16".into(),
416            gdn_chunk_src,
417        );
418        // Wave 5b.1 iter 2 — chunk_scaled_dot_kkt kernel (input-side of
419        // the chunk pipeline; spec source: FLA chunk_scaled_dot_kkt.py:36-99).
420        let gdn_kkt_src: &'static str =
421            include_str!("shaders/gated_delta_net_kkt.metal");
422        sources.insert("gated_delta_net_kkt_bf16".into(), gdn_kkt_src);
423        // Wave 5b.1 iter 2 — recompute_w_u_fwd kernel (applies post-solve A
424        // to (β·v) and (β·k·exp(g)) to produce w and u; spec source: FLA
425        // wy_fast.py:29-117).
426        let gdn_recompute_wu_src: &'static str =
427            include_str!("shaders/gated_delta_net_recompute_wu.metal");
428        sources.insert(
429            "gated_delta_net_recompute_wu_bf16".into(),
430            gdn_recompute_wu_src,
431        );
432        // Wave 5b.1 iter 3 — chunk_fwd_o kernel (per-chunk output: closes
433        // the chunk pipeline; spec source: FLA chunk_o.py:42-138).
434        let gdn_chunk_o_src: &'static str =
435            include_str!("shaders/gated_delta_net_chunk_o.metal");
436        sources.insert("gated_delta_net_chunk_o_bf16".into(), gdn_chunk_o_src);
437        // Wave 5b.1 iter 4 — orchestrator helper kernels:
438        //   chunk_local_cumsum_g_f32      — per-chunk prefix sum on g [B, T, H]
439        //   chunk_tri_solve_invert_f32    — per-chunk-block (I + A_strict)^-1
440        //                                   on FLA's [B, T, H, BT] layout.
441        let chunk_local_cumsum_g_src: &'static str =
442            include_str!("shaders/chunk_local_cumsum_g.metal");
443        sources.insert(
444            "chunk_local_cumsum_g_f32".into(),
445            chunk_local_cumsum_g_src,
446        );
447        let chunk_tri_solve_invert_src: &'static str =
448            include_str!("shaders/chunk_gated_delta_rule_tri_solve_invert.metal");
449        sources.insert(
450            "chunk_tri_solve_invert_f32".into(),
451            chunk_tri_solve_invert_src,
452        );
453        // Sigmoid-gated elementwise multiply (ADR-013 Decision 9 — full-attn output gate)
454        let sigmoid_mul_src: &'static str = include_str!("shaders/sigmoid_mul.metal");
455        sources.insert("sigmoid_mul_f32".into(), sigmoid_mul_src);
456        sources.insert("sigmoid_mul_bf16".into(), sigmoid_mul_src);
457        let silu_mul_src: &'static str = include_str!("shaders/silu_mul.metal");
458        sources.insert("silu_mul_f32".into(), silu_mul_src);
459        let compute_g_beta_src: &'static str = include_str!("shaders/compute_g_beta.metal");
460        sources.insert("compute_g_beta_f32".into(), compute_g_beta_src);
461        let ssm_norm_gate_src: &'static str = include_str!("shaders/ssm_norm_gate.metal");
462        sources.insert("ssm_norm_gate_f32".into(), ssm_norm_gate_src);
463        let gelu_src: &'static str = include_str!("shaders/gelu.metal");
464        sources.insert("gelu_f32".into(), gelu_src);
465        sources.insert("gelu_f16".into(), gelu_src);
466        sources.insert("gelu_bf16".into(), gelu_src);
467        let softmax_src: &'static str = include_str!("shaders/softmax.metal");
468        sources.insert("softmax_f32".into(), softmax_src);
469        sources.insert("softmax_f16".into(), softmax_src);
470        sources.insert("softmax_bf16".into(), softmax_src);
471        let softmax_backward_src: &'static str =
472            include_str!("shaders/softmax_backward.metal");
473        sources.insert("softmax_backward_f32".into(), softmax_backward_src);
474        let log_elementwise_src: &'static str =
475            include_str!("shaders/log_elementwise.metal");
476        sources.insert("log_f32".into(), log_elementwise_src);
477        sources.insert("log_backward_f32".into(), log_elementwise_src);
478        let row_sum_src: &'static str = include_str!("shaders/row_sum.metal");
479        sources.insert("row_sum_f32".into(), row_sum_src);
480        sources.insert("row_sum_backward_f32".into(), row_sum_src);
481        // ADR-020 iter-10a: GGUF-legacy quantize-dequantize round-trip kernels
482        // (Q4_0 + Q8_0).  Used by hf2q's dynamic_quant Track 1 to produce
483        // W_low / W_high for the gradient-Taylor sensitivity formula.
484        let qdq_legacy_src: &'static str = include_str!("shaders/qdq_legacy.metal");
485        sources.insert("qdq_q4_0_f32".into(), qdq_legacy_src);
486        sources.insert("qdq_q8_0_f32".into(), qdq_legacy_src);
487        // ADR-020 iter-10b: RMSNorm reverse-mode autograd kernels.
488        // r_inv helper is reused by both backward kernels; dx and dw cover
489        // the full backward identity for `y = x * rsqrt(mean(x²) + eps) * w`.
490        let rms_norm_backward_src: &'static str =
491            include_str!("shaders/rms_norm_backward.metal");
492        sources.insert(
493            "rms_norm_compute_rms_inv_f32".into(),
494            rms_norm_backward_src,
495        );
496        sources.insert("rms_norm_backward_dx_f32".into(), rms_norm_backward_src);
497        sources.insert("rms_norm_backward_dw_f32".into(), rms_norm_backward_src);
498        // ADR-020 iter-11a: 2-D row-major slice + concat-by-column kernels.
499        // Used by hf2q's multi-head SDPA on GpuTape (slice Q/K/V into
500        // per-head views, run per-head SDPA, concat per-head contexts
501        // back to full attention output).
502        let slice_concat_2d_src: &'static str =
503            include_str!("shaders/slice_concat_2d.metal");
504        sources.insert("slice_2d_cols_f32".into(), slice_concat_2d_src);
505        sources.insert("copy_2d_cols_into_f32".into(), slice_concat_2d_src);
506        // ADR-020 iter-11b: SiLU forward + backward kernels for GpuTape
507        // SwiGLU FFN composition.
508        let silu_backward_src: &'static str =
509            include_str!("shaders/silu_backward.metal");
510        sources.insert("silu_f32".into(), silu_backward_src);
511        sources.insert("silu_backward_f32".into(), silu_backward_src);
512        // ADR-020 iter-11d: FP32 embedding lookup + scatter-add backward.
513        let embedding_autograd_src: &'static str =
514            include_str!("shaders/embedding_autograd.metal");
515        sources.insert("embedding_lookup_f32".into(), embedding_autograd_src);
516        sources.insert(
517            "embedding_scatter_add_f32".into(),
518            embedding_autograd_src,
519        );
520        // ADR-020 iter-13a: Adam optimizer step kernel for Track 2
521        // DWQ-proper training loop.
522        let adam_update_src: &'static str =
523            include_str!("shaders/adam_update.metal");
524        sources.insert("adam_update_f32".into(), adam_update_src);
525        // ADR-020 iter-13b: differentiable affine qdq kernels for the
526        // DWQ-proper training loop.  Init + forward + backward (scales,
527        // biases) — q_int is FROZEN, scales+biases learnable.
528        let qdq_affine_src: &'static str =
529            include_str!("shaders/qdq_affine.metal");
530        sources.insert("qdq_affine_init_f32".into(), qdq_affine_src);
531        sources.insert("qdq_affine_forward_f32".into(), qdq_affine_src);
532        sources.insert(
533            "qdq_affine_backward_scales_f32".into(),
534            qdq_affine_src,
535        );
536        sources.insert(
537            "qdq_affine_backward_biases_f32".into(),
538            qdq_affine_src,
539        );
540        // ADR-020 iter-15: fused affine quantized matmul for DWQ inference.
541        // Per-element kernel; one thread per (m, n) output element.
542        // Tiled + simdgroup-MMA variant lands in iter-15b.
543        let qmm_affine_src: &'static str =
544            include_str!("shaders/qmm_affine.metal");
545        sources.insert("qmm_affine_t_f32".into(), qmm_affine_src);
546        // ADR-020 iter-15b: tiled variant — 16x16 thread block with
547        // cooperative-load X/W tiles in threadgroup-shared memory for
548        // 2-5x speedup over the per-element kernel.
549        let qmm_affine_tiled_src: &'static str =
550            include_str!("shaders/qmm_affine_tiled.metal");
551        sources.insert(
552            "qmm_affine_t_f32_tiled".into(),
553            qmm_affine_tiled_src,
554        );
555        let softcap_src: &'static str = include_str!("shaders/softcap.metal");
556        sources.insert("softcap_f32".into(), softcap_src);
557        sources.insert("softcap_f16".into(), softcap_src);
558        sources.insert("softcap_bf16".into(), softcap_src);
559
560        // Fused norm-add kernels — Gemma4 post-attention / post-FFN ordering:
561        //   normed = rms_norm(input, weight, eps);  output = residual + normed
562        let fused_norm_add_src: &'static str =
563            include_str!("shaders/fused_norm_add_bf16.metal");
564        sources.insert("fused_norm_add_bf16".into(), fused_norm_add_src);
565        sources.insert("fused_norm_add_no_weight_bf16".into(), fused_norm_add_src);
566
567        // Fused head-norm + RoPE f32 kernel — replaces separate rms_norm + rope_neox_f32
568        let fused_hnr_f32_src: &'static str =
569            include_str!("shaders/fused_head_norm_rope_f32.metal");
570        sources.insert("fused_head_norm_rope_f32".into(), fused_hnr_f32_src);
571
572        // Fused head-norm + RoPE bf16 kernels (single-token + batch prefill)
573        // Both entry points live in the same .metal file.
574        let fused_hnr_bf16_src: &'static str =
575            include_str!("shaders/fused_head_norm_rope_bf16.metal");
576        sources.insert("fused_head_norm_rope_bf16".into(), fused_hnr_bf16_src);
577        sources.insert("fused_head_norm_rope_batch_bf16".into(), fused_hnr_bf16_src);
578
579        // Fused norm-add f32 kernels — post-attention / post-FFN / end-of-layer
580        let fused_norm_add_f32_src: &'static str =
581            include_str!("shaders/fused_norm_add_f32.metal");
582        sources.insert("fused_norm_add_f32".into(), fused_norm_add_f32_src);
583        sources.insert("fused_residual_norm_f32".into(), fused_norm_add_f32_src);
584        sources.insert("fused_residual_norm_scalar_f32".into(), fused_norm_add_f32_src);
585        sources.insert("fused_moe_routing_f32".into(), fused_norm_add_f32_src);
586        sources.insert("fused_moe_routing_batch_f32".into(), fused_norm_add_f32_src);
587        sources.insert("fused_norm_add_scalar_f32".into(), fused_norm_add_f32_src);
588        sources.insert("fused_moe_wsum_norm_add_f32".into(), fused_norm_add_f32_src);
589        sources.insert("fused_moe_wsum_dnorm_add_f32".into(), fused_norm_add_f32_src);
590
591        // Argsort kernel (Story 2.3) — MoE top-K routing
592        let argsort_src: &'static str = include_str!("shaders/argsort.metal");
593        sources.insert("argsort_desc_f32".into(), argsort_src);
594
595        // Gather / index_select kernel (Story 2.4)
596        let gather_src: &'static str = include_str!("shaders/gather.metal");
597        sources.insert("gather_f32".into(), gather_src);
598
599        // F32 KV cache copy kernel (Session merge S1+S2)
600        let kv_cache_copy_src: &'static str =
601            include_str!("shaders/kv_cache_copy.metal");
602        sources.insert("kv_cache_copy".into(), kv_cache_copy_src);
603        sources.insert("kv_cache_copy_f32".into(), kv_cache_copy_src);
604
605        // Strided copy kernel (Story 2.5)
606        let copy_src: &'static str = include_str!("shaders/copy.metal");
607        sources.insert("strided_copy_f32".into(), copy_src);
608        sources.insert("offset_copy_f32".into(), copy_src);
609
610        // Fused-QKV split kernel (ADR-005 W-5b.18 — replaces hf2q CPU
611        // download → triple-loop split → 3× upload round-trip in
612        // gpu_delta_net::layer_qkv_deinterleave).
613        let qkv_split_src: &'static str = include_str!("shaders/qkv_split.metal");
614        sources.insert("qkv_split_f32".into(), qkv_split_src);
615
616        // Tiled-GQA broadcast kernel (ADR-005 W-5b.19 — replaces hf2q CPU
617        // tiled-replicate at gpu_delta_net::apply_gated_delta_net_chunk
618        // GQA pre-expansion, ~497 ms / 10.4 ms-per-layer at PP4106).
619        let repeat_tiled_src: &'static str =
620            include_str!("shaders/repeat_tiled.metal");
621        sources.insert("repeat_tiled_f32".into(), repeat_tiled_src);
622
623        // Dense F16 GEMM kernel (Story 2.6) — lm_head projection
624        let dense_gemm_src: &'static str = include_str!("shaders/dense_gemm.metal");
625        sources.insert("dense_gemm_f16".into(), dense_gemm_src);
626        sources.insert("dense_matvec_f16".into(), dense_gemm_src);
627        sources.insert("dense_matvec_f16w_f32io".into(), dense_gemm_src);
628        // BF16-weight mat-vec: BF16 weights × F32 input → F32 output (decode lm_head)
629        sources.insert("dense_matvec_bf16w_f32io".into(), dense_gemm_src);
630        // Pure F32 mat-vec: F32 weights × F32 input → F32 output (decode lm_head)
631        sources.insert("dense_matvec_f32".into(), dense_gemm_src);
632
633        // Standalone FWHT for TurboQuant pre/post-rotation (SIMD shuffle, zero barriers)
634        let fwht_src: &'static str = include_str!("shaders/fwht_standalone.metal");
635        sources.insert("fwht_standalone_f32_d256".into(), fwht_src);
636        sources.insert("fwht_standalone_f32_d512".into(), fwht_src);
637        // ADR-007 iter-14 D1 SRHT variants: sign pre-mult (for Q) + sign undo (for output)
638        sources.insert("fwht_sign_premult_f32_d256".into(), fwht_src);
639        sources.insert("fwht_sign_premult_f32_d512".into(), fwht_src);
640        sources.insert("fwht_sign_undo_f32_d256".into(), fwht_src);
641        sources.insert("fwht_sign_undo_f32_d512".into(), fwht_src);
642
643        // Fast Hadamard quantize (SIMD shuffle, zero barriers)
644        let hq_fast_src: &'static str = include_str!("shaders/hadamard_quantize_kv_fast.metal");
645        sources.insert("hadamard_quantize_kv_fast_d256".into(), hq_fast_src);
646        sources.insert("hadamard_quantize_kv_fast_d512".into(), hq_fast_src);
647        // Track B (iter-21): higher-bit (5/6-bit) quantize kernels (byte-packed)
648        sources.insert("hadamard_quantize_kv_hb_d256".into(), hq_fast_src);
649        sources.insert("hadamard_quantize_kv_hb_d512".into(), hq_fast_src);
650
651        // iter-20 Leg F: TQ KV dequantize kernel (nibbles+norms → F32)
652        let tq_dq_src: &'static str = include_str!("shaders/tq_dequantize_kv.metal");
653        sources.insert("tq_dequantize_kv".into(), tq_dq_src);
654        // Track B (iter-21): higher-bit dequantize kernel (byte-packed indices)
655        sources.insert("tq_dequantize_hb_kv".into(), tq_dq_src);
656
657        // iter-24: native higher-bit (5/6/8-bit) TQ SDPA kernel (byte-packed K/V)
658        let tq_hb_src: &'static str = include_str!("shaders/flash_attn_vec_tq_hb.metal");
659        sources.insert("flash_attn_vec_tq_hb_dk256".into(), tq_hb_src);
660        sources.insert("flash_attn_vec_tq_hb_dk512".into(), tq_hb_src);
661
662        // GPU sampling kernels — eliminate logits readback (Phase 6)
663        let argmax_src: &'static str = include_str!("shaders/argmax.metal");
664        sources.insert("argmax_f32".into(), argmax_src);
665        let softmax_sample_src: &'static str =
666            include_str!("shaders/softmax_sample.metal");
667        sources.insert("softmax_sample_f32".into(), softmax_sample_src);
668        // Top-K kernel for Q8 rerank: avoids full-logits readback.
669        let top_k_src: &'static str = include_str!("shaders/top_k.metal");
670        sources.insert("top_k_f32".into(), top_k_src);
671
672        // MoE GPU routing + weighted reduce (ADR-013 P13.3 perf).
673        // Replaces CPU softmax+topk round-trip and CPU weighted accumulate.
674        let moe_stk_src: &'static str =
675            include_str!("shaders/moe_softmax_topk.metal");
676        sources.insert("moe_softmax_topk_f32".into(), moe_stk_src);
677        let moe_wr_src: &'static str =
678            include_str!("shaders/moe_weighted_reduce.metal");
679        sources.insert("moe_weighted_reduce_f32".into(), moe_wr_src);
680        let sdpa_decode_src: &'static str =
681            include_str!("shaders/sdpa_decode.metal");
682        sources.insert("sdpa_decode".into(), sdpa_decode_src);
683
684        Self {
685            cache: HashMap::new(),
686            sources,
687        }
688    }
689
690    /// Register a shader source at runtime (useful for testing and dynamic
691    /// kernel generation).
692    pub fn register_source(&mut self, name: impl Into<String>, source: &'static str) {
693        let name = name.into();
694        // Invalidate any cached pipeline for this name since the source changed.
695        self.cache.remove(&name);
696        self.sources.insert(name, source);
697    }
698
699    /// Get a compiled compute pipeline for the named kernel function.
700    ///
701    /// On first call for a given name, this compiles the MSL source into a
702    /// Metal library, extracts the named function, and creates a
703    /// `ComputePipelineState`.  Subsequent calls return the cached pipeline.
704    ///
705    /// # Errors
706    ///
707    /// * `MlxError::KernelNotFound` — no source registered for this name.
708    /// * `MlxError::ShaderCompilationError` — MSL compilation or pipeline
709    ///   creation failed.
710    pub fn get_pipeline(
711        &mut self,
712        name: &str,
713        device: &metal::DeviceRef,
714    ) -> Result<&ComputePipelineState> {
715        if !self.cache.contains_key(name) {
716            // Slow path: compile the shader.
717            let source = self.sources.get(name).ok_or_else(|| {
718                MlxError::KernelNotFound(name.to_string())
719            })?;
720
721            let compile_opts = metal::CompileOptions::new();
722            let library = device
723                .new_library_with_source(source, &compile_opts)
724                .map_err(|msg| MlxError::ShaderCompilationError {
725                    name: name.to_string(),
726                    message: msg,
727                })?;
728
729            let function = library
730                .get_function(name, None)
731                .map_err(|msg| MlxError::ShaderCompilationError {
732                    name: name.to_string(),
733                    message: msg,
734                })?;
735
736            // Build the pipeline through a descriptor so we can attach a
737            // human-readable label.  The label propagates into Instruments /
738            // xctrace Metal System Trace as the per-pipeline identifier
739            // (`metal-object-label` schema), giving us per-kernel attribution
740            // instead of the generic "Compute Command 0" placeholder.
741            //
742            // `MTLComputePipelineState.label` is read-only after creation per
743            // the Apple Metal spec; the only supported way to set it is via
744            // the descriptor before pipeline creation.  ADR-015 iter9b.
745            let descriptor = ComputePipelineDescriptor::new();
746            descriptor.set_compute_function(Some(&function));
747            descriptor.set_label(name);
748
749            let pipeline = device
750                .new_compute_pipeline_state(&descriptor)
751                .map_err(|msg| MlxError::ShaderCompilationError {
752                    name: name.to_string(),
753                    message: msg,
754                })?;
755
756            self.cache.insert(name.to_string(), pipeline);
757        }
758
759        // At this point the pipeline is guaranteed to be in the cache.
760        // We use `ok_or_else` instead of `expect` to satisfy the no-panic policy.
761        self.cache.get(name).ok_or_else(|| {
762            MlxError::KernelNotFound(name.to_string())
763        })
764    }
765
766    /// Get a compiled compute pipeline for the named kernel, specialized with
767    /// Metal function constants (both bool and i32 in one call).
768    ///
769    /// `bool_constants` contains `(index, value)` pairs mapping to
770    /// `[[function_constant(index)]]` bool declarations in the MSL shader.
771    /// `int_constants` contains `(index, value)` pairs mapping to
772    /// `[[function_constant(index)]]` int (int32_t) declarations in the MSL
773    /// shader.
774    ///
775    /// Pipelines are cached by a composite key:
776    /// `"<name>|<index>:b<0|1>|...|<index>:i<value>|..."`.  The 'b' prefix
777    /// marks bool entries and the 'i' prefix marks i32 entries, making the
778    /// format unambiguous regardless of constant ordering.  Distinct
779    /// `(name, constants)` combinations each compile to a separate pipeline;
780    /// the slow compilation path runs at most once per unique combination.
781    ///
782    /// # Errors
783    ///
784    /// * `MlxError::KernelNotFound` — no source registered for this name.
785    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
786    ///   specialisation, or pipeline creation failed.
787    pub fn get_pipeline_with_constants(
788        &mut self,
789        name: &str,
790        device: &metal::DeviceRef,
791        bool_constants: &[(usize, bool)],
792        int_constants: &[(usize, i32)],
793    ) -> Result<&ComputePipelineState> {
794        // Build a composite cache key so distinct constant combinations each
795        // compile to their own pipeline.  Bool entries use the 'b' type marker
796        // and i32 entries use 'i'; this prevents a collision between, e.g.,
797        // bool index 5 value 1 and int index 5 value 1.
798        let mut cache_key = name.to_string();
799        for &(index, value) in bool_constants {
800            cache_key.push('|');
801            cache_key.push_str(&index.to_string());
802            cache_key.push_str(if value { ":b1" } else { ":b0" });
803        }
804        for &(index, value) in int_constants {
805            cache_key.push('|');
806            cache_key.push_str(&index.to_string());
807            cache_key.push(':');
808            cache_key.push('i');
809            cache_key.push_str(&value.to_string());
810        }
811
812        if !self.cache.contains_key(&cache_key) {
813            // Slow path: compile the shader with function constant specialisation.
814            let source = self.sources.get(name).ok_or_else(|| {
815                MlxError::KernelNotFound(name.to_string())
816            })?;
817
818            let compile_opts = metal::CompileOptions::new();
819            let library = device
820                .new_library_with_source(source, &compile_opts)
821                .map_err(|msg| MlxError::ShaderCompilationError {
822                    name: name.to_string(),
823                    message: msg,
824                })?;
825
826            // Build the FunctionConstantValues object with all bool and i32
827            // constants.  Metal's set_constant_value_at_index reads the value
828            // through a raw pointer; the pointed-to bytes must match the size
829            // declared in the MSL shader (1 byte for bool, 4 bytes for int).
830            let fcv = FunctionConstantValues::new();
831
832            for &(index, value) in bool_constants {
833                // MTLDataType::Bool = 53 (metal-rs argument.rs).
834                // The Metal runtime reads it as an Objective-C BOOL (uint8_t).
835                let v: u8 = if value { 1 } else { 0 };
836                fcv.set_constant_value_at_index(
837                    (&v as *const u8).cast::<std::ffi::c_void>(),
838                    MTLDataType::Bool,
839                    index as u64,
840                );
841            }
842
843            for &(index, value) in int_constants {
844                // MTLDataType::Int = 29 (metal-rs argument.rs).
845                // The Metal runtime reads 4 bytes as a signed 32-bit integer,
846                // matching the Metal shader type `constant int`.
847                fcv.set_constant_value_at_index(
848                    (&value as *const i32).cast::<std::ffi::c_void>(),
849                    MTLDataType::Int,
850                    index as u64,
851                );
852            }
853
854            let function = library
855                .get_function(name, Some(fcv))
856                .map_err(|msg| MlxError::ShaderCompilationError {
857                    name: name.to_string(),
858                    message: msg,
859                })?;
860
861            // Label this specialisation with the full composite cache key
862            // (e.g. `kernel_mul_mv_q4_0_f32|0:b1|3:i32`) so xctrace Metal
863            // System Trace shows each function-constant variant as a distinct
864            // pipeline.  Without this, all specialisations share a generic
865            // "Compute Command 0" identifier and we cannot attribute µs/token
866            // to a specific (kernel, constants) combination.  ADR-015 iter9b.
867            let descriptor = ComputePipelineDescriptor::new();
868            descriptor.set_compute_function(Some(&function));
869            descriptor.set_label(&cache_key);
870
871            let pipeline = device
872                .new_compute_pipeline_state(&descriptor)
873                .map_err(|msg| MlxError::ShaderCompilationError {
874                    name: name.to_string(),
875                    message: msg,
876                })?;
877
878            self.cache.insert(cache_key.clone(), pipeline);
879        }
880
881        self.cache.get(&cache_key).ok_or_else(|| {
882            MlxError::KernelNotFound(name.to_string())
883        })
884    }
885
886    /// Get a compiled compute pipeline for the named kernel, specialized with
887    /// Metal bool function constants.
888    ///
889    /// The `bool_constants` slice contains `(index, value)` pairs.  Each pair
890    /// maps to a `[[function_constant(index)]]` declaration in the MSL shader.
891    ///
892    /// This is a thin wrapper around [`get_pipeline_with_constants`] that
893    /// passes an empty `int_constants` slice.  Existing callers continue to
894    /// work without modification; the cache-key format for pure-bool pipelines
895    /// is compatible (bool entries carry the 'b' type marker, which is the
896    /// only format ever written by this wrapper).
897    ///
898    /// # Errors
899    ///
900    /// * `MlxError::KernelNotFound` — no source registered for this name.
901    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
902    ///   specialisation, or pipeline creation failed.
903    pub fn get_pipeline_with_bool_constants(
904        &mut self,
905        name: &str,
906        device: &metal::DeviceRef,
907        bool_constants: &[(usize, bool)],
908    ) -> Result<&ComputePipelineState> {
909        self.get_pipeline_with_constants(name, device, bool_constants, &[])
910    }
911
912    /// Check if a pipeline for the given name is already compiled and cached.
913    pub fn is_cached(&self, name: &str) -> bool {
914        self.cache.contains_key(name)
915    }
916
917    /// Number of compiled pipelines currently in the cache.
918    pub fn cached_count(&self) -> usize {
919        self.cache.len()
920    }
921
922    /// Number of registered shader sources.
923    pub fn source_count(&self) -> usize {
924        self.sources.len()
925    }
926}
927
928impl Default for KernelRegistry {
929    fn default() -> Self {
930        Self::new()
931    }
932}
933
934#[cfg(test)]
935mod tests {
936    use super::*;
937
938    /// Minimal Metal shader that uses a single int function constant.
939    ///
940    /// The kernel writes the constant value N into the first element of the
941    /// output buffer, allowing the test to verify that the Metal compiler
942    /// actually sees distinct specialisations for N=4 and N=8.
943    ///
944    /// The shader is intentionally trivial — we only need it to *compile* with
945    /// an int function constant; correctness of the kernel logic is not under
946    /// test here.
947    const INT_FC_TEST_SHADER: &str = r#"
948#include <metal_stdlib>
949using namespace metal;
950
951constant int test_N [[function_constant(100)]];
952
953kernel void int_fc_test_kernel(
954    device int* out [[buffer(0)]],
955    uint tid [[thread_position_in_grid]])
956{
957    if (tid == 0) {
958        out[0] = test_N;
959    }
960}
961"#;
962
963    /// Verify that `get_pipeline_with_constants` produces distinct cached
964    /// pipelines for different i32 function-constant values, and that
965    /// `get_pipeline_with_bool_constants` (the backward-compat wrapper) still
966    /// works correctly with the new 'b'-prefixed cache-key format.
967    ///
968    /// This test requires a real Metal device and is therefore marked
969    /// `#[ignore]` on non-Apple platforms, but runs unconditionally on macOS.
970    #[test]
971    fn test_int_fc_distinct_pipelines_and_bool_compat() {
972        let device = metal::Device::system_default()
973            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
974
975        let mut registry = KernelRegistry::new();
976
977        // Register the inline test shader under a name that cannot collide with
978        // any production kernel.
979        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
980
981        // Compile with N=4.
982        let p4_ptr = registry
983            .get_pipeline_with_constants(
984                "int_fc_test_kernel",
985                &device,
986                &[],                  // no bool constants
987                &[(100, 4_i32)],      // int constant index 100 = 4
988            )
989            .expect("pipeline N=4 should compile") as *const _;
990
991        // Cache must now have exactly 1 entry for this kernel.
992        // (Other production kernels may already be in cache from new(); here
993        // we check that the N=4 key was inserted.)
994        let count_after_n4 = registry.cached_count();
995
996        // Compile with N=8 — must produce a SEPARATE pipeline.
997        let p8_ptr = registry
998            .get_pipeline_with_constants(
999                "int_fc_test_kernel",
1000                &device,
1001                &[],
1002                &[(100, 8_i32)],
1003            )
1004            .expect("pipeline N=8 should compile") as *const _;
1005
1006        // Cache must have grown by exactly 1.
1007        assert_eq!(
1008            registry.cached_count(),
1009            count_after_n4 + 1,
1010            "N=8 must produce a new cache entry"
1011        );
1012
1013        // The two pipelines must be distinct objects in the cache.
1014        assert_ne!(
1015            p4_ptr, p8_ptr,
1016            "N=4 and N=8 specialisations must be separate ComputePipelineState objects"
1017        );
1018
1019        // A second call with N=4 must return the SAME pipeline (cache hit, no
1020        // new compilation).
1021        let p4_again_ptr = registry
1022            .get_pipeline_with_constants(
1023                "int_fc_test_kernel",
1024                &device,
1025                &[],
1026                &[(100, 4_i32)],
1027            )
1028            .expect("pipeline N=4 cache hit should succeed") as *const _;
1029
1030        assert_eq!(
1031            registry.cached_count(),
1032            count_after_n4 + 1,
1033            "repeated N=4 call must be a cache hit, not a new entry"
1034        );
1035        assert_eq!(
1036            p4_ptr, p4_again_ptr,
1037            "repeated N=4 call must return the same pipeline pointer"
1038        );
1039
1040        // Verify backward compatibility: get_pipeline_with_bool_constants must
1041        // still route through get_pipeline_with_constants and produce a cached
1042        // pipeline without panicking.
1043        //
1044        // We register a separate bool-constant shader that does NOT use a bool
1045        // function constant (so the Metal compiler ignores missing FCs for
1046        // this trivial case) — but the call path and cache-key format are what
1047        // matter here.  We reuse the int_fc_test_kernel source; the bool FC is
1048        // simply unused by the shader (Metal allows unused FCs when the shader
1049        // declares them with `function_constant` but the value is never read).
1050        //
1051        // To avoid a Metal compiler error for an undeclared function constant,
1052        // we register a separate bare-kernel shader for the bool wrapper test.
1053        const BARE_SHADER: &str = r#"
1054#include <metal_stdlib>
1055using namespace metal;
1056kernel void bare_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
1057    if (tid == 0) { out[0] = 42; }
1058}
1059"#;
1060        registry.register_source("bare_kernel", BARE_SHADER);
1061
1062        let count_before_bool = registry.cached_count();
1063        let _bool_pipeline = registry
1064            .get_pipeline_with_bool_constants("bare_kernel", &device, &[])
1065            .expect("bool-constants wrapper with empty slice must succeed");
1066
1067        assert_eq!(
1068            registry.cached_count(),
1069            count_before_bool + 1,
1070            "bool-constants wrapper must insert one new cache entry"
1071        );
1072    }
1073
1074    /// Verify that the `MTLComputePipelineState.label` produced by
1075    /// `get_pipeline` and `get_pipeline_with_constants` actually propagates
1076    /// from the descriptor to the resulting pipeline state.
1077    ///
1078    /// This is the in-process smoke check for ADR-015 iter9b: we cannot
1079    /// reach into xctrace from Rust, but we can read back the same `label`
1080    /// property xctrace consumes via `ComputePipelineStateRef::label()`.
1081    /// If labels are missing or wrong here, the MST trace will also show
1082    /// generic identifiers — so this test gates the iter9 retry's
1083    /// per-Q4_0-kernel attribution.
1084    #[test]
1085    fn test_pipeline_labels_propagate_for_mst() {
1086        let device = metal::Device::system_default()
1087            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
1088
1089        let mut registry = KernelRegistry::new();
1090
1091        // Reuse the same trivial shaders as the int-FC test.
1092        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
1093
1094        const BARE_SHADER_LABEL_TEST: &str = r#"
1095#include <metal_stdlib>
1096using namespace metal;
1097kernel void label_smoke_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
1098    if (tid == 0) { out[0] = 7; }
1099}
1100"#;
1101        registry.register_source("label_smoke_kernel", BARE_SHADER_LABEL_TEST);
1102
1103        // Plain get_pipeline path — label must equal the kernel name.
1104        // Capture as owned String so the cache borrow is released before
1105        // the next get_pipeline_with_constants call below.
1106        let plain_label = registry
1107            .get_pipeline("label_smoke_kernel", &device)
1108            .expect("plain pipeline must compile")
1109            .label()
1110            .to_string();
1111        assert_eq!(
1112            plain_label, "label_smoke_kernel",
1113            "get_pipeline must label the pipeline with the kernel name (xctrace MST attribution)"
1114        );
1115
1116        // Constants path — label must equal the composite cache key so each
1117        // function-constant variant is individually attributable in MST.
1118        // We capture the label as an owned String to release the borrow on
1119        // the cache before fetching the next specialisation.
1120        let label_v7 = registry
1121            .get_pipeline_with_constants(
1122                "int_fc_test_kernel",
1123                &device,
1124                &[],
1125                &[(100, 7_i32)],
1126            )
1127            .expect("specialised pipeline must compile")
1128            .label()
1129            .to_string();
1130        assert_eq!(
1131            label_v7, "int_fc_test_kernel|100:i7",
1132            "get_pipeline_with_constants must label with the cache_key so each \
1133             specialisation is distinct in xctrace MST"
1134        );
1135
1136        // A second specialisation must produce a different label.
1137        let label_v13 = registry
1138            .get_pipeline_with_constants(
1139                "int_fc_test_kernel",
1140                &device,
1141                &[],
1142                &[(100, 13_i32)],
1143            )
1144            .expect("second specialised pipeline must compile")
1145            .label()
1146            .to_string();
1147        assert_eq!(label_v13, "int_fc_test_kernel|100:i13");
1148        assert_ne!(
1149            label_v7, label_v13,
1150            "distinct constant values must yield distinct pipeline labels"
1151        );
1152    }
1153}