Skip to main content

mlx_native/
kernel_registry.rs

1//! [`KernelRegistry`] — lazy compilation and caching of Metal compute pipelines.
2//!
3//! MSL shader source is embedded at compile time via `include_str!`.  On first
4//! access, the source is compiled into a Metal library, the named function is
5//! extracted, and a `ComputePipelineState` is created and cached.  Subsequent
6//! calls return the cached pipeline.
7
8use std::collections::HashMap;
9
10use metal::{ComputePipelineDescriptor, ComputePipelineState, FunctionConstantValues, MTLDataType};
11
12use crate::error::{MlxError, Result};
13
14// MTLDataType numeric values (from metal-rs argument.rs, confirmed in Apple Metal spec):
15//   Int  = 29
16//   Bool = 53
17// These are used when calling set_constant_value_at_index so the Metal runtime
18// knows how wide each constant value is.
19
20/// Registry that lazily compiles and caches Metal compute pipelines from
21/// embedded MSL source.
22///
23/// # Usage
24///
25/// ```ignore
26/// let mut registry = KernelRegistry::new();
27/// let pipeline = registry.get_pipeline("elementwise_add", device.metal_device())?;
28/// encoder.encode(&pipeline, &buffers, grid, tg);
29/// ```
30///
31/// # Thread Safety
32///
33/// `KernelRegistry` is **not** `Sync` by default (it uses `&mut self` for
34/// `get_pipeline` to allow mutable cache insertion).  If you need concurrent
35/// access, wrap it in a `Mutex` or use one registry per thread.
36pub struct KernelRegistry {
37    /// Cached pipelines keyed by kernel function name.
38    cache: HashMap<String, ComputePipelineState>,
39    /// MSL source text keyed by kernel function name.
40    ///
41    /// Populated at construction time with all embedded shader sources.
42    sources: HashMap<String, &'static str>,
43}
44
45impl KernelRegistry {
46    /// Create a new registry with all embedded shader sources pre-registered.
47    ///
48    /// No compilation happens here — shaders are compiled lazily on first use.
49    pub fn new() -> Self {
50        let mut sources = HashMap::new();
51
52        // Register embedded shader sources.
53        sources.insert(
54            "placeholder".into(),
55            include_str!("shaders/placeholder.metal"),
56        );
57        sources.insert(
58            "quantized_matmul".into(),
59            include_str!("shaders/quantized_matmul.metal"),
60        );
61        sources.insert(
62            "quantized_matmul_simd".into(),
63            include_str!("shaders/quantized_matmul.metal"),
64        );
65        sources.insert(
66            "quantized_matmul_simd_bf16".into(),
67            include_str!("shaders/quantized_matmul.metal"),
68        );
69        sources.insert(
70            "quantized_matmul_simd_bf16_expert".into(),
71            include_str!("shaders/quantized_matmul.metal"),
72        );
73
74        // GGML block-format quantized mat-vec kernels (ADR-006 Phase 3)
75        let ggml_src: &'static str =
76            include_str!("shaders/quantized_matmul_ggml.metal");
77        sources.insert("kernel_mul_mv_q4_0_f32".into(), ggml_src);
78        sources.insert("kernel_mul_mv_q8_0_f32".into(), ggml_src);
79        sources.insert("kernel_mul_mv_q6_K_f32".into(), ggml_src);
80
81        // GGML block-format quantized matrix-matrix kernels
82        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's kernel_mul_mm_<q>_f32).
83        // Used at prefill m > 8 to reuse each weight tile across a 32-row
84        // block via threadgroup-staged simdgroup MMA, instead of re-reading
85        // every block per prompt-token as the mv kernel does.
86        let ggml_mm_src: &'static str =
87            include_str!("shaders/quantized_matmul_mm.metal");
88        sources.insert("kernel_mul_mm_q4_0_f32".into(), ggml_mm_src);
89        sources.insert("kernel_mul_mm_q8_0_f32".into(), ggml_mm_src);
90        sources.insert("kernel_mul_mm_q6_K_f32".into(), ggml_mm_src);
91
92        // GGML block-format quantized matrix-matrix kernels — tensor API
93        // variant (ADR-011 Phase 3 Wave P3b-tensor: port of llama.cpp's
94        // kernel_mul_mm_impl `#ifdef GGML_METAL_HAS_TENSOR` branch).
95        // Uses Apple's MetalPerformancePrimitives `tensor_ops::matmul2d`
96        // primitive which on M3+ dispatches to hardware tensor cores for
97        // 2-3x the effective FLOP throughput vs the simdgroup MMA path.
98        // Only compiled on devices where the tensor API is available; the
99        // kernel_registry's runtime-probe (see MlxDevice::has_tensor) gates
100        // compilation so non-tensor devices transparently fall back to the
101        // non-tensor `kernel_mul_mm_<q>_f32` kernels.
102        let ggml_mm_tensor_src: &'static str =
103            include_str!("shaders/quantized_matmul_mm_tensor.metal");
104        sources.insert("kernel_mul_mm_q4_0_tensor_f32".into(), ggml_mm_tensor_src);
105        sources.insert("kernel_mul_mm_q4_0_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
106        sources.insert("kernel_mul_mm_q6_K_tensor_bf16_perm021".into(), ggml_mm_tensor_src);
107        sources.insert("kernel_mul_mm_q8_0_tensor_f32".into(), ggml_mm_tensor_src);
108        sources.insert("kernel_mul_mm_q6_K_tensor_f32".into(), ggml_mm_tensor_src);
109
110        // Dense bf16×f32 → f32 tensor-API matmul (non-flash-attention
111        // prefill Q@K^T and scores@V, modeled on llama.cpp's
112        // kernel_mul_mm_bf16_f32 with the GGML_METAL_HAS_TENSOR branch
113        // active).  Tile geometry and write-back identical to the
114        // quantized tensor kernel; only the A-stage copy (bfloat →
115        // bfloat, no dequantize) differs.
116        let dense_mm_bf16_tensor_src: &'static str =
117            include_str!("shaders/dense_mm_bf16_tensor.metal");
118        sources.insert("hf2q_dense_mm_bf16_f32_tensor".into(), dense_mm_bf16_tensor_src);
119
120        // Dense f32×f32 → f32 tensor-API matmul (F32-everywhere
121        // sibling of dense_mm_bf16_tensor).  Used by hf2q's ADR-005
122        // iter-118 BF16-vs-F32 ViT attention A/B diagnostic to remove
123        // the BF16 K-stage cast as a confounding variable.  Port of
124        // llama.cpp's kernel_mul_mm_f32_f32 specialization
125        // (ggml-metal.metal:10098) on the GGML_METAL_HAS_TENSOR
126        // branch.  Same tile geometry (NR0=64 NR1=32 NK=32) but
127        // float-everywhere shmem staging.
128        let dense_mm_f32_f32_tensor_src: &'static str =
129            include_str!("shaders/dense_mm_f32_f32.metal");
130        sources.insert("hf2q_dense_mm_f32_f32_tensor".into(), dense_mm_f32_f32_tensor_src);
131
132        // Dense f16×f32 → f32 tensor-API matmul (F16-staging sibling
133        // of dense_mm_bf16_tensor).  Used by hf2q's ADR-005 Phase 2c
134        // iter-128 gemma4v ViT precision-parity path: every mmproj
135        // weight is stored as F16 in GGUF, peer's `kernel_mul_mm_f16_f32`
136        // (`ggml-metal.metal:10099`) stages BOTH A and B as `half` in
137        // shmem and computes on `simdgroup_half8x8`.  Matches peer
138        // per-element rounding budget exactly (10-bit mantissa vs
139        // BF16's 7-bit), closing the 1.16x/block cascade compound that
140        // iter-127 numerically bisected to BF16 staging.  Same tile
141        // geometry as the BF16 sibling (NR0=64 NR1=32 NK=32, 8 KB
142        // shmem) — half and bfloat share 16-bit storage.
143        let dense_mm_f16_tensor_src: &'static str =
144            include_str!("shaders/dense_mm_f16_tensor.metal");
145        sources.insert("hf2q_dense_mm_f16_f32_tensor".into(), dense_mm_f16_tensor_src);
146
147        // Dense bf16×f32 → f32 GEMV (matrix-vector multiply) — optimized
148        // for M=1 single-token decode.  Port of llama.cpp's
149        // kernel_mul_mv_bf16_f32_4 (bfloat4-vectorized GEMV kernel).
150        // Used in apply_linear_projection_f32 when seq_len=1 and the
151        // weight matrix is BF16, replacing the MM kernel (~2× faster for
152        // M=1 due to better memory bandwidth utilization per thread).
153        let dense_gemv_bf16_src: &'static str =
154            include_str!("shaders/dense_gemv_bf16.metal");
155        sources.insert("hf2q_dense_gemv_bf16_f32_4".into(), dense_gemv_bf16_src);
156
157        // Fused scale-mask-softmax for the non-flash-attention prefill
158        // path.  One row-local threadgroup per (head, query) pair
159        // replaces three separate dispatches (scale, mask-add, softmax);
160        // reads a bf16 mask (-INF at masked positions, matching
161        // flash_attn_prefill_mask.metal) that is shared across heads.
162        let scale_mask_softmax_src: &'static str =
163            include_str!("shaders/scale_mask_softmax.metal");
164        sources.insert("scale_mask_softmax_f32".into(), scale_mask_softmax_src);
165
166        // Expert-routed (MoE) quantized matmul kernel (Story 2.1)
167        sources.insert(
168            "quantized_matmul_id".into(),
169            include_str!("shaders/quantized_matmul_id.metal"),
170        );
171
172        // Expert-routed (MoE) GGML block-format quantized matmul kernels
173        let ggml_id_src: &'static str =
174            include_str!("shaders/quantized_matmul_id_ggml.metal");
175        sources.insert("kernel_mul_mv_id_q4_0_f32".into(), ggml_id_src);
176        sources.insert("kernel_mul_mv_id_q8_0_f32".into(), ggml_id_src);
177        sources.insert("kernel_mul_mv_id_q5_K_f32".into(), ggml_id_src);
178        sources.insert("kernel_mul_mv_id_q6_K_f32".into(), ggml_id_src);
179        // Fused-SwiGLU mv_id variants (ADR-012 §Optimize / Task #15):
180        // computes y[r][n] = sum_k(dequant(W[expert][n][k]) * silu(gate[r][k]) * up[r][k])
181        // in one dispatch — replaces silu_mul + expert_down sequence.
182        sources.insert("kernel_mul_mv_id_q4_0_f32_swiglu".into(), ggml_id_src);
183
184        // Expert-routed (MoE) GGML block-format QUANTIZED MATRIX-MATRIX kernels
185        // (ADR-011 Phase 3 Wave P3a: port of llama.cpp's
186        // `kernel_mul_mm_id_map0_ne20_N` + `kernel_mul_mm_id_<q>_f32`).
187        // Two-stage dispatch: map0 regroups the token-to-expert table into
188        // per-expert routed-token lists, then mm_id stages a 64x32 expert
189        // weight tile into threadgroup shmem and reuses it across a 32-row
190        // block of that expert's routed tokens.
191        let ggml_id_mm_src: &'static str =
192            include_str!("shaders/quantized_matmul_id_mm.metal");
193        sources.insert("kernel_mul_mm_id_map0_ne20_1".into(), ggml_id_mm_src);
194        sources.insert("kernel_mul_mm_id_map0_ne20_8".into(), ggml_id_mm_src);
195        sources.insert("kernel_mul_mm_id_q4_0_f32".into(), ggml_id_mm_src);
196        sources.insert("kernel_mul_mm_id_q8_0_f32".into(), ggml_id_mm_src);
197        sources.insert("kernel_mul_mm_id_q6_K_f32".into(), ggml_id_mm_src);
198
199        // MoE-routed quantized matrix-matrix kernels — tensor API variant
200        // (ADR-011 Phase 3 Wave P3b-tensor).  Uses the MPP tensor_ops
201        // matmul2d primitive for hardware-tensor-core MMA on M3+.  Only
202        // the mm_id kernel is ported — map0 is a short pre-pass (not
203        // matmul) and continues to use the simdgroup version.
204        let ggml_id_mm_tensor_src: &'static str =
205            include_str!("shaders/quantized_matmul_id_mm_tensor.metal");
206        sources.insert("kernel_mul_mm_id_q4_0_tensor_f32".into(), ggml_id_mm_tensor_src);
207        sources.insert("kernel_mul_mm_id_q8_0_tensor_f32".into(), ggml_id_mm_tensor_src);
208        sources.insert("kernel_mul_mm_id_q6_K_tensor_f32".into(), ggml_id_mm_tensor_src);
209
210        // Embedding kernels (Story 1.5)
211        let embedding_src: &'static str = include_str!("shaders/embedding.metal");
212        sources.insert("embedding_gather_4bit".into(), embedding_src);
213        sources.insert("embedding_gather_6bit".into(), embedding_src);
214
215        // MoE gate kernel (Story 1.5)
216        let moe_gate_src: &'static str = include_str!("shaders/moe_gate.metal");
217        sources.insert("moe_gate".into(), moe_gate_src);
218
219        // MoE dispatch kernels (Story 1.5)
220        let moe_dispatch_src: &'static str = include_str!("shaders/moe_dispatch.metal");
221        sources.insert("fused_gelu_mul".into(), moe_dispatch_src);
222        sources.insert("moe_swiglu_fused".into(), moe_dispatch_src);
223        sources.insert("moe_swiglu_batch".into(), moe_dispatch_src);
224        sources.insert("moe_swiglu_seq".into(), moe_dispatch_src);
225        sources.insert("moe_accumulate".into(), moe_dispatch_src);
226        sources.insert("moe_weighted_sum".into(), moe_dispatch_src);
227        sources.insert("moe_weighted_sum_seq".into(), moe_dispatch_src);
228        sources.insert("zero_buffer".into(), moe_dispatch_src);
229        sources.insert("naive_matvec_f32".into(), moe_dispatch_src);
230        sources.insert("moe_gather_topk_weights".into(), moe_dispatch_src);
231        // bf16 variants (Phase 2 bf16 activation path)
232        sources.insert("fused_gelu_mul_bf16".into(), moe_dispatch_src);
233        sources.insert("moe_swiglu_seq_bf16".into(), moe_dispatch_src);
234        sources.insert("moe_weighted_sum_seq_bf16_input".into(), moe_dispatch_src);
235
236        // Batched KV cache copy kernels
237        let kv_cache_src: &'static str = include_str!("shaders/kv_cache_copy.metal");
238        sources.insert("kv_cache_copy_batch_f32".into(), kv_cache_src);
239        sources.insert("kv_cache_copy_batch_f32_to_f16".into(), kv_cache_src);
240        sources.insert("kv_cache_copy_seq_f32".into(), kv_cache_src);
241        sources.insert("kv_cache_copy_seq_f32_to_f16".into(), kv_cache_src);
242        // Wave P4.11 — fused K+V copy variants
243        sources.insert("kv_cache_copy_seq_f32_kv_dual".into(), kv_cache_src);
244        sources.insert("kv_cache_copy_seq_f32_to_f16_kv_dual".into(), kv_cache_src);
245        // bf16-source KV cache copy (Phase 2 bf16 activation path)
246        sources.insert("kv_cache_copy_seq_bf16".into(), kv_cache_src);
247
248        // Elementwise and transpose kernels (Story 1.5)
249        let elementwise_src: &'static str = include_str!("shaders/elementwise.metal");
250        sources.insert("elementwise_add_f32".into(), elementwise_src);
251        sources.insert("elementwise_add_f16".into(), elementwise_src);
252        sources.insert("elementwise_mul_f32".into(), elementwise_src);
253        sources.insert("elementwise_mul_f16".into(), elementwise_src);
254        sources.insert("elementwise_add_bf16".into(), elementwise_src);
255        sources.insert("elementwise_mul_bf16".into(), elementwise_src);
256        sources.insert("cast_f16_to_f32".into(), elementwise_src);
257        sources.insert("cast_f32_to_f16".into(), elementwise_src);
258        sources.insert("cast_bf16_to_f32".into(), elementwise_src);
259        sources.insert("cast_f32_to_bf16".into(), elementwise_src);
260        sources.insert("scalar_mul_bf16".into(), elementwise_src);
261        sources.insert("scalar_mul_f32".into(), elementwise_src);
262        sources.insert("embedding_gather_scale_f32".into(), elementwise_src);
263        sources.insert("embedding_gather_scale_batch_f32".into(), elementwise_src);
264        sources.insert("permute_021_bf16".into(), elementwise_src);
265        sources.insert("transpose_last2_bf16".into(), elementwise_src);
266        sources.insert("transpose_last2_f16".into(), elementwise_src);
267        sources.insert("permute_021_f32".into(), elementwise_src);
268        sources.insert("permute_021_bf16_to_f32".into(), elementwise_src);
269        sources.insert("transpose_2d_f32".into(), elementwise_src);
270        sources.insert("transpose_2d_f16".into(), elementwise_src);
271
272        // Attention kernels (Story 1.3)
273        let sdpa_src: &'static str = include_str!("shaders/sdpa.metal");
274        sources.insert("sdpa".into(), sdpa_src);
275        sources.insert("sdpa_bf16".into(), sdpa_src);
276        let sdpa_sliding_src: &'static str = include_str!("shaders/sdpa_sliding.metal");
277        sources.insert("sdpa_sliding".into(), sdpa_sliding_src);
278        sources.insert("sdpa_sliding_bf16".into(), sdpa_sliding_src);
279
280        // Flash-attention tiled prefill kernel (ADR-011 Phase 1).
281        // Ten entry points; all backed by the same shader source.
282        // Pipelines are compiled with function constants via
283        // `get_pipeline_with_bool_constants` — not `get_pipeline`.
284        let flash_attn_prefill_src: &'static str =
285            include_str!("shaders/flash_attn_prefill.metal");
286        // D=256 variants (BQ=32, BK=16, WM=4, WN=1 — 128 threads/threadgroup)
287        sources.insert(
288            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskfloat32".into(),
289            flash_attn_prefill_src,
290        );
291        sources.insert(
292            "steel_attention_float32_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
293            flash_attn_prefill_src,
294        );
295        sources.insert(
296            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbfloat16".into(),
297            flash_attn_prefill_src,
298        );
299        sources.insert(
300            "steel_attention_bfloat16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
301            flash_attn_prefill_src,
302        );
303        sources.insert(
304            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskfloat16".into(),
305            flash_attn_prefill_src,
306        );
307        sources.insert(
308            "steel_attention_float16_bq32_bk16_bd256_wm4_wn1_maskbool_".into(),
309            flash_attn_prefill_src,
310        );
311        // D=512 variants (BQ=8, BK=8, WM=1, WN=1 — 32 threads/threadgroup)
312        // NOTE: f32 at D=512 is NOT instantiated — threadgroup memory exceeds
313        // the 32 KB Metal limit (candle sdpa.rs:86-94).
314        sources.insert(
315            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbfloat16".into(),
316            flash_attn_prefill_src,
317        );
318        sources.insert(
319            "steel_attention_bfloat16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
320            flash_attn_prefill_src,
321        );
322        sources.insert(
323            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskfloat16".into(),
324            flash_attn_prefill_src,
325        );
326        sources.insert(
327            "steel_attention_float16_bq8_bk8_bd512_wm1_wn1_maskbool_".into(),
328            flash_attn_prefill_src,
329        );
330
331        // Flash attention vector kernels — SIMD-vectorized decode-path SDPA
332        // (ported from llama.cpp flash_attn_ext_vec)
333        let flash_attn_vec_src: &'static str =
334            include_str!("shaders/flash_attn_vec.metal");
335        sources.insert("flash_attn_vec_dk256".into(), flash_attn_vec_src);
336        sources.insert("flash_attn_vec_dk512".into(), flash_attn_vec_src);
337        sources.insert("flash_attn_vec_reduce_dk256".into(), flash_attn_vec_src);
338        sources.insert("flash_attn_vec_reduce_dk512".into(), flash_attn_vec_src);
339        // F16 KV variants (Phase 4a)
340        sources.insert("flash_attn_vec_f16kv_dk256".into(), flash_attn_vec_src);
341        sources.insert("flash_attn_vec_f16kv_dk512".into(), flash_attn_vec_src);
342
343        // RoPE, normalization, activation kernels (Story 1.4)
344        let rope_src: &'static str = include_str!("shaders/rope.metal");
345        sources.insert("rope_f32".into(), rope_src);
346        sources.insert("rope_f16".into(), rope_src);
347        sources.insert("rope_bf16".into(), rope_src);
348        sources.insert("rope_neox_bf16".into(), rope_src);
349        sources.insert("rope_neox_f32".into(), rope_src);
350        let rms_norm_src: &'static str = include_str!("shaders/rms_norm.metal");
351        sources.insert("rms_norm_f32".into(), rms_norm_src);
352        sources.insert("rms_norm_f16".into(), rms_norm_src);
353        sources.insert("rms_norm_bf16".into(), rms_norm_src);
354        sources.insert("rms_norm_no_scale_bf16".into(), rms_norm_src);
355        sources.insert("rms_norm_no_scale_f32".into(), rms_norm_src);
356        sources.insert("rms_norm_no_scale_f32_dual".into(), rms_norm_src);
357        sources.insert("rms_norm_f32_triple".into(), rms_norm_src);
358        sources.insert("fused_post_attn_triple_norm_f32".into(), rms_norm_src);
359        sources.insert("rms_norm_no_scale_f32_dual_perm".into(), rms_norm_src);
360        // Fused RMS norm + elementwise multiply kernels (Phase 4e.2)
361        sources.insert("rms_norm_mul_f32".into(), rms_norm_src);
362        sources.insert("rms_norm_mul_f16".into(), rms_norm_src);
363        sources.insert("rms_norm_mul_bf16".into(), rms_norm_src);
364        // L2 norm kernels (ADR-013 Decision 3 — Gated DeltaNet Q/K norm)
365        let l2_norm_src: &'static str = include_str!("shaders/l2_norm.metal");
366        sources.insert("l2_norm_f32".into(), l2_norm_src);
367        sources.insert("l2_norm_f16".into(), l2_norm_src);
368        sources.insert("l2_norm_bf16".into(), l2_norm_src);
369        // Cumulative-sum kernels (ADR-013 Decision 4 — DeltaNet decay-mask base)
370        let cumsum_src: &'static str = include_str!("shaders/cumsum.metal");
371        sources.insert("cumsum_f32".into(), cumsum_src);
372        sources.insert("cumsum_bf16".into(), cumsum_src);
373        // SSM conv kernels (ADR-013 Decision 7 — DeltaNet 1D causal conv + SiLU)
374        let ssm_conv_src: &'static str = include_str!("shaders/ssm_conv.metal");
375        sources.insert("ssm_conv_forward_f32".into(), ssm_conv_src);
376        sources.insert("ssm_conv_forward_bf16".into(), ssm_conv_src);
377        sources.insert("ssm_conv_state_update_f32".into(), ssm_conv_src);
378        sources.insert("ssm_conv_state_update_bf16".into(), ssm_conv_src);
379        // Tri-solve kernels (ADR-013 Decision 5 — chunked DeltaNet debug path)
380        let tri_solve_src: &'static str = include_str!("shaders/tri_solve.metal");
381        sources.insert("tri_solve_lower_unit_f32".into(), tri_solve_src);
382        sources.insert("tri_solve_lower_unit_bf16".into(), tri_solve_src);
383        // Rope-multi kernels (ADR-013 Decision 10 — IMROPE for Qwen3.5)
384        let rope_multi_src: &'static str = include_str!("shaders/rope_multi.metal");
385        sources.insert("rope_multi_f32".into(), rope_multi_src);
386        sources.insert("rope_multi_bf16".into(), rope_multi_src);
387        // Gated DeltaNet fused kernel (ADR-013 Decision 6 — centerpiece)
388        let gdn_src: &'static str = include_str!("shaders/gated_delta_net.metal");
389        sources.insert("gated_delta_net_f32".into(), gdn_src);
390        // Wave 5b — chunk-parallel inter-chunk state-recurrence kernel
391        // (the one new kernel in the chunk-parallel pipeline; spec source:
392        // arXiv 2412.06464 §4 + FLA chunk_delta_h.py:43-298).
393        let gdn_chunk_src: &'static str =
394            include_str!("shaders/gated_delta_net_chunk.metal");
395        sources.insert(
396            "gated_delta_net_chunk_inter_state_bf16".into(),
397            gdn_chunk_src,
398        );
399        // Wave 5b.1 iter 2 — chunk_scaled_dot_kkt kernel (input-side of
400        // the chunk pipeline; spec source: FLA chunk_scaled_dot_kkt.py:36-99).
401        let gdn_kkt_src: &'static str =
402            include_str!("shaders/gated_delta_net_kkt.metal");
403        sources.insert("gated_delta_net_kkt_bf16".into(), gdn_kkt_src);
404        // Wave 5b.1 iter 2 — recompute_w_u_fwd kernel (applies post-solve A
405        // to (β·v) and (β·k·exp(g)) to produce w and u; spec source: FLA
406        // wy_fast.py:29-117).
407        let gdn_recompute_wu_src: &'static str =
408            include_str!("shaders/gated_delta_net_recompute_wu.metal");
409        sources.insert(
410            "gated_delta_net_recompute_wu_bf16".into(),
411            gdn_recompute_wu_src,
412        );
413        // Wave 5b.1 iter 3 — chunk_fwd_o kernel (per-chunk output: closes
414        // the chunk pipeline; spec source: FLA chunk_o.py:42-138).
415        let gdn_chunk_o_src: &'static str =
416            include_str!("shaders/gated_delta_net_chunk_o.metal");
417        sources.insert("gated_delta_net_chunk_o_bf16".into(), gdn_chunk_o_src);
418        // Wave 5b.1 iter 4 — orchestrator helper kernels:
419        //   chunk_local_cumsum_g_f32      — per-chunk prefix sum on g [B, T, H]
420        //   chunk_tri_solve_invert_f32    — per-chunk-block (I + A_strict)^-1
421        //                                   on FLA's [B, T, H, BT] layout.
422        let chunk_local_cumsum_g_src: &'static str =
423            include_str!("shaders/chunk_local_cumsum_g.metal");
424        sources.insert(
425            "chunk_local_cumsum_g_f32".into(),
426            chunk_local_cumsum_g_src,
427        );
428        let chunk_tri_solve_invert_src: &'static str =
429            include_str!("shaders/chunk_gated_delta_rule_tri_solve_invert.metal");
430        sources.insert(
431            "chunk_tri_solve_invert_f32".into(),
432            chunk_tri_solve_invert_src,
433        );
434        // Sigmoid-gated elementwise multiply (ADR-013 Decision 9 — full-attn output gate)
435        let sigmoid_mul_src: &'static str = include_str!("shaders/sigmoid_mul.metal");
436        sources.insert("sigmoid_mul_f32".into(), sigmoid_mul_src);
437        sources.insert("sigmoid_mul_bf16".into(), sigmoid_mul_src);
438        let silu_mul_src: &'static str = include_str!("shaders/silu_mul.metal");
439        sources.insert("silu_mul_f32".into(), silu_mul_src);
440        let compute_g_beta_src: &'static str = include_str!("shaders/compute_g_beta.metal");
441        sources.insert("compute_g_beta_f32".into(), compute_g_beta_src);
442        let ssm_norm_gate_src: &'static str = include_str!("shaders/ssm_norm_gate.metal");
443        sources.insert("ssm_norm_gate_f32".into(), ssm_norm_gate_src);
444        let gelu_src: &'static str = include_str!("shaders/gelu.metal");
445        sources.insert("gelu_f32".into(), gelu_src);
446        sources.insert("gelu_f16".into(), gelu_src);
447        sources.insert("gelu_bf16".into(), gelu_src);
448        let softmax_src: &'static str = include_str!("shaders/softmax.metal");
449        sources.insert("softmax_f32".into(), softmax_src);
450        sources.insert("softmax_f16".into(), softmax_src);
451        sources.insert("softmax_bf16".into(), softmax_src);
452        let softcap_src: &'static str = include_str!("shaders/softcap.metal");
453        sources.insert("softcap_f32".into(), softcap_src);
454        sources.insert("softcap_f16".into(), softcap_src);
455        sources.insert("softcap_bf16".into(), softcap_src);
456
457        // Fused norm-add kernels — Gemma4 post-attention / post-FFN ordering:
458        //   normed = rms_norm(input, weight, eps);  output = residual + normed
459        let fused_norm_add_src: &'static str =
460            include_str!("shaders/fused_norm_add_bf16.metal");
461        sources.insert("fused_norm_add_bf16".into(), fused_norm_add_src);
462        sources.insert("fused_norm_add_no_weight_bf16".into(), fused_norm_add_src);
463
464        // Fused head-norm + RoPE f32 kernel — replaces separate rms_norm + rope_neox_f32
465        let fused_hnr_f32_src: &'static str =
466            include_str!("shaders/fused_head_norm_rope_f32.metal");
467        sources.insert("fused_head_norm_rope_f32".into(), fused_hnr_f32_src);
468
469        // Fused head-norm + RoPE bf16 kernels (single-token + batch prefill)
470        // Both entry points live in the same .metal file.
471        let fused_hnr_bf16_src: &'static str =
472            include_str!("shaders/fused_head_norm_rope_bf16.metal");
473        sources.insert("fused_head_norm_rope_bf16".into(), fused_hnr_bf16_src);
474        sources.insert("fused_head_norm_rope_batch_bf16".into(), fused_hnr_bf16_src);
475
476        // Fused norm-add f32 kernels — post-attention / post-FFN / end-of-layer
477        let fused_norm_add_f32_src: &'static str =
478            include_str!("shaders/fused_norm_add_f32.metal");
479        sources.insert("fused_norm_add_f32".into(), fused_norm_add_f32_src);
480        sources.insert("fused_residual_norm_f32".into(), fused_norm_add_f32_src);
481        sources.insert("fused_residual_norm_scalar_f32".into(), fused_norm_add_f32_src);
482        sources.insert("fused_moe_routing_f32".into(), fused_norm_add_f32_src);
483        sources.insert("fused_moe_routing_batch_f32".into(), fused_norm_add_f32_src);
484        sources.insert("fused_norm_add_scalar_f32".into(), fused_norm_add_f32_src);
485        sources.insert("fused_moe_wsum_norm_add_f32".into(), fused_norm_add_f32_src);
486        sources.insert("fused_moe_wsum_dnorm_add_f32".into(), fused_norm_add_f32_src);
487
488        // Argsort kernel (Story 2.3) — MoE top-K routing
489        let argsort_src: &'static str = include_str!("shaders/argsort.metal");
490        sources.insert("argsort_desc_f32".into(), argsort_src);
491
492        // Gather / index_select kernel (Story 2.4)
493        let gather_src: &'static str = include_str!("shaders/gather.metal");
494        sources.insert("gather_f32".into(), gather_src);
495
496        // F32 KV cache copy kernel (Session merge S1+S2)
497        let kv_cache_copy_src: &'static str =
498            include_str!("shaders/kv_cache_copy.metal");
499        sources.insert("kv_cache_copy".into(), kv_cache_copy_src);
500        sources.insert("kv_cache_copy_f32".into(), kv_cache_copy_src);
501
502        // Strided copy kernel (Story 2.5)
503        let copy_src: &'static str = include_str!("shaders/copy.metal");
504        sources.insert("strided_copy_f32".into(), copy_src);
505        sources.insert("offset_copy_f32".into(), copy_src);
506
507        // Fused-QKV split kernel (ADR-005 W-5b.18 — replaces hf2q CPU
508        // download → triple-loop split → 3× upload round-trip in
509        // gpu_delta_net::layer_qkv_deinterleave).
510        let qkv_split_src: &'static str = include_str!("shaders/qkv_split.metal");
511        sources.insert("qkv_split_f32".into(), qkv_split_src);
512
513        // Tiled-GQA broadcast kernel (ADR-005 W-5b.19 — replaces hf2q CPU
514        // tiled-replicate at gpu_delta_net::apply_gated_delta_net_chunk
515        // GQA pre-expansion, ~497 ms / 10.4 ms-per-layer at PP4106).
516        let repeat_tiled_src: &'static str =
517            include_str!("shaders/repeat_tiled.metal");
518        sources.insert("repeat_tiled_f32".into(), repeat_tiled_src);
519
520        // Dense F16 GEMM kernel (Story 2.6) — lm_head projection
521        let dense_gemm_src: &'static str = include_str!("shaders/dense_gemm.metal");
522        sources.insert("dense_gemm_f16".into(), dense_gemm_src);
523        sources.insert("dense_matvec_f16".into(), dense_gemm_src);
524        sources.insert("dense_matvec_f16w_f32io".into(), dense_gemm_src);
525        // BF16-weight mat-vec: BF16 weights × F32 input → F32 output (decode lm_head)
526        sources.insert("dense_matvec_bf16w_f32io".into(), dense_gemm_src);
527        // Pure F32 mat-vec: F32 weights × F32 input → F32 output (decode lm_head)
528        sources.insert("dense_matvec_f32".into(), dense_gemm_src);
529
530        // Standalone FWHT for TurboQuant pre/post-rotation (SIMD shuffle, zero barriers)
531        let fwht_src: &'static str = include_str!("shaders/fwht_standalone.metal");
532        sources.insert("fwht_standalone_f32_d256".into(), fwht_src);
533        sources.insert("fwht_standalone_f32_d512".into(), fwht_src);
534        // ADR-007 iter-14 D1 SRHT variants: sign pre-mult (for Q) + sign undo (for output)
535        sources.insert("fwht_sign_premult_f32_d256".into(), fwht_src);
536        sources.insert("fwht_sign_premult_f32_d512".into(), fwht_src);
537        sources.insert("fwht_sign_undo_f32_d256".into(), fwht_src);
538        sources.insert("fwht_sign_undo_f32_d512".into(), fwht_src);
539
540        // Fast Hadamard quantize (SIMD shuffle, zero barriers)
541        let hq_fast_src: &'static str = include_str!("shaders/hadamard_quantize_kv_fast.metal");
542        sources.insert("hadamard_quantize_kv_fast_d256".into(), hq_fast_src);
543        sources.insert("hadamard_quantize_kv_fast_d512".into(), hq_fast_src);
544        // Track B (iter-21): higher-bit (5/6-bit) quantize kernels (byte-packed)
545        sources.insert("hadamard_quantize_kv_hb_d256".into(), hq_fast_src);
546        sources.insert("hadamard_quantize_kv_hb_d512".into(), hq_fast_src);
547
548        // iter-20 Leg F: TQ KV dequantize kernel (nibbles+norms → F32)
549        let tq_dq_src: &'static str = include_str!("shaders/tq_dequantize_kv.metal");
550        sources.insert("tq_dequantize_kv".into(), tq_dq_src);
551        // Track B (iter-21): higher-bit dequantize kernel (byte-packed indices)
552        sources.insert("tq_dequantize_hb_kv".into(), tq_dq_src);
553
554        // iter-24: native higher-bit (5/6/8-bit) TQ SDPA kernel (byte-packed K/V)
555        let tq_hb_src: &'static str = include_str!("shaders/flash_attn_vec_tq_hb.metal");
556        sources.insert("flash_attn_vec_tq_hb_dk256".into(), tq_hb_src);
557        sources.insert("flash_attn_vec_tq_hb_dk512".into(), tq_hb_src);
558
559        // GPU sampling kernels — eliminate logits readback (Phase 6)
560        let argmax_src: &'static str = include_str!("shaders/argmax.metal");
561        sources.insert("argmax_f32".into(), argmax_src);
562        let softmax_sample_src: &'static str =
563            include_str!("shaders/softmax_sample.metal");
564        sources.insert("softmax_sample_f32".into(), softmax_sample_src);
565        // Top-K kernel for Q8 rerank: avoids full-logits readback.
566        let top_k_src: &'static str = include_str!("shaders/top_k.metal");
567        sources.insert("top_k_f32".into(), top_k_src);
568
569        // MoE GPU routing + weighted reduce (ADR-013 P13.3 perf).
570        // Replaces CPU softmax+topk round-trip and CPU weighted accumulate.
571        let moe_stk_src: &'static str =
572            include_str!("shaders/moe_softmax_topk.metal");
573        sources.insert("moe_softmax_topk_f32".into(), moe_stk_src);
574        let moe_wr_src: &'static str =
575            include_str!("shaders/moe_weighted_reduce.metal");
576        sources.insert("moe_weighted_reduce_f32".into(), moe_wr_src);
577        let sdpa_decode_src: &'static str =
578            include_str!("shaders/sdpa_decode.metal");
579        sources.insert("sdpa_decode".into(), sdpa_decode_src);
580
581        Self {
582            cache: HashMap::new(),
583            sources,
584        }
585    }
586
587    /// Register a shader source at runtime (useful for testing and dynamic
588    /// kernel generation).
589    pub fn register_source(&mut self, name: impl Into<String>, source: &'static str) {
590        let name = name.into();
591        // Invalidate any cached pipeline for this name since the source changed.
592        self.cache.remove(&name);
593        self.sources.insert(name, source);
594    }
595
596    /// Get a compiled compute pipeline for the named kernel function.
597    ///
598    /// On first call for a given name, this compiles the MSL source into a
599    /// Metal library, extracts the named function, and creates a
600    /// `ComputePipelineState`.  Subsequent calls return the cached pipeline.
601    ///
602    /// # Errors
603    ///
604    /// * `MlxError::KernelNotFound` — no source registered for this name.
605    /// * `MlxError::ShaderCompilationError` — MSL compilation or pipeline
606    ///   creation failed.
607    pub fn get_pipeline(
608        &mut self,
609        name: &str,
610        device: &metal::DeviceRef,
611    ) -> Result<&ComputePipelineState> {
612        if !self.cache.contains_key(name) {
613            // Slow path: compile the shader.
614            let source = self.sources.get(name).ok_or_else(|| {
615                MlxError::KernelNotFound(name.to_string())
616            })?;
617
618            let compile_opts = metal::CompileOptions::new();
619            let library = device
620                .new_library_with_source(source, &compile_opts)
621                .map_err(|msg| MlxError::ShaderCompilationError {
622                    name: name.to_string(),
623                    message: msg,
624                })?;
625
626            let function = library
627                .get_function(name, None)
628                .map_err(|msg| MlxError::ShaderCompilationError {
629                    name: name.to_string(),
630                    message: msg,
631                })?;
632
633            // Build the pipeline through a descriptor so we can attach a
634            // human-readable label.  The label propagates into Instruments /
635            // xctrace Metal System Trace as the per-pipeline identifier
636            // (`metal-object-label` schema), giving us per-kernel attribution
637            // instead of the generic "Compute Command 0" placeholder.
638            //
639            // `MTLComputePipelineState.label` is read-only after creation per
640            // the Apple Metal spec; the only supported way to set it is via
641            // the descriptor before pipeline creation.  ADR-015 iter9b.
642            let descriptor = ComputePipelineDescriptor::new();
643            descriptor.set_compute_function(Some(&function));
644            descriptor.set_label(name);
645
646            let pipeline = device
647                .new_compute_pipeline_state(&descriptor)
648                .map_err(|msg| MlxError::ShaderCompilationError {
649                    name: name.to_string(),
650                    message: msg,
651                })?;
652
653            self.cache.insert(name.to_string(), pipeline);
654        }
655
656        // At this point the pipeline is guaranteed to be in the cache.
657        // We use `ok_or_else` instead of `expect` to satisfy the no-panic policy.
658        self.cache.get(name).ok_or_else(|| {
659            MlxError::KernelNotFound(name.to_string())
660        })
661    }
662
663    /// Get a compiled compute pipeline for the named kernel, specialized with
664    /// Metal function constants (both bool and i32 in one call).
665    ///
666    /// `bool_constants` contains `(index, value)` pairs mapping to
667    /// `[[function_constant(index)]]` bool declarations in the MSL shader.
668    /// `int_constants` contains `(index, value)` pairs mapping to
669    /// `[[function_constant(index)]]` int (int32_t) declarations in the MSL
670    /// shader.
671    ///
672    /// Pipelines are cached by a composite key:
673    /// `"<name>|<index>:b<0|1>|...|<index>:i<value>|..."`.  The 'b' prefix
674    /// marks bool entries and the 'i' prefix marks i32 entries, making the
675    /// format unambiguous regardless of constant ordering.  Distinct
676    /// `(name, constants)` combinations each compile to a separate pipeline;
677    /// the slow compilation path runs at most once per unique combination.
678    ///
679    /// # Errors
680    ///
681    /// * `MlxError::KernelNotFound` — no source registered for this name.
682    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
683    ///   specialisation, or pipeline creation failed.
684    pub fn get_pipeline_with_constants(
685        &mut self,
686        name: &str,
687        device: &metal::DeviceRef,
688        bool_constants: &[(usize, bool)],
689        int_constants: &[(usize, i32)],
690    ) -> Result<&ComputePipelineState> {
691        // Build a composite cache key so distinct constant combinations each
692        // compile to their own pipeline.  Bool entries use the 'b' type marker
693        // and i32 entries use 'i'; this prevents a collision between, e.g.,
694        // bool index 5 value 1 and int index 5 value 1.
695        let mut cache_key = name.to_string();
696        for &(index, value) in bool_constants {
697            cache_key.push('|');
698            cache_key.push_str(&index.to_string());
699            cache_key.push_str(if value { ":b1" } else { ":b0" });
700        }
701        for &(index, value) in int_constants {
702            cache_key.push('|');
703            cache_key.push_str(&index.to_string());
704            cache_key.push(':');
705            cache_key.push('i');
706            cache_key.push_str(&value.to_string());
707        }
708
709        if !self.cache.contains_key(&cache_key) {
710            // Slow path: compile the shader with function constant specialisation.
711            let source = self.sources.get(name).ok_or_else(|| {
712                MlxError::KernelNotFound(name.to_string())
713            })?;
714
715            let compile_opts = metal::CompileOptions::new();
716            let library = device
717                .new_library_with_source(source, &compile_opts)
718                .map_err(|msg| MlxError::ShaderCompilationError {
719                    name: name.to_string(),
720                    message: msg,
721                })?;
722
723            // Build the FunctionConstantValues object with all bool and i32
724            // constants.  Metal's set_constant_value_at_index reads the value
725            // through a raw pointer; the pointed-to bytes must match the size
726            // declared in the MSL shader (1 byte for bool, 4 bytes for int).
727            let fcv = FunctionConstantValues::new();
728
729            for &(index, value) in bool_constants {
730                // MTLDataType::Bool = 53 (metal-rs argument.rs).
731                // The Metal runtime reads it as an Objective-C BOOL (uint8_t).
732                let v: u8 = if value { 1 } else { 0 };
733                fcv.set_constant_value_at_index(
734                    (&v as *const u8).cast::<std::ffi::c_void>(),
735                    MTLDataType::Bool,
736                    index as u64,
737                );
738            }
739
740            for &(index, value) in int_constants {
741                // MTLDataType::Int = 29 (metal-rs argument.rs).
742                // The Metal runtime reads 4 bytes as a signed 32-bit integer,
743                // matching the Metal shader type `constant int`.
744                fcv.set_constant_value_at_index(
745                    (&value as *const i32).cast::<std::ffi::c_void>(),
746                    MTLDataType::Int,
747                    index as u64,
748                );
749            }
750
751            let function = library
752                .get_function(name, Some(fcv))
753                .map_err(|msg| MlxError::ShaderCompilationError {
754                    name: name.to_string(),
755                    message: msg,
756                })?;
757
758            // Label this specialisation with the full composite cache key
759            // (e.g. `kernel_mul_mv_q4_0_f32|0:b1|3:i32`) so xctrace Metal
760            // System Trace shows each function-constant variant as a distinct
761            // pipeline.  Without this, all specialisations share a generic
762            // "Compute Command 0" identifier and we cannot attribute µs/token
763            // to a specific (kernel, constants) combination.  ADR-015 iter9b.
764            let descriptor = ComputePipelineDescriptor::new();
765            descriptor.set_compute_function(Some(&function));
766            descriptor.set_label(&cache_key);
767
768            let pipeline = device
769                .new_compute_pipeline_state(&descriptor)
770                .map_err(|msg| MlxError::ShaderCompilationError {
771                    name: name.to_string(),
772                    message: msg,
773                })?;
774
775            self.cache.insert(cache_key.clone(), pipeline);
776        }
777
778        self.cache.get(&cache_key).ok_or_else(|| {
779            MlxError::KernelNotFound(name.to_string())
780        })
781    }
782
783    /// Get a compiled compute pipeline for the named kernel, specialized with
784    /// Metal bool function constants.
785    ///
786    /// The `bool_constants` slice contains `(index, value)` pairs.  Each pair
787    /// maps to a `[[function_constant(index)]]` declaration in the MSL shader.
788    ///
789    /// This is a thin wrapper around [`get_pipeline_with_constants`] that
790    /// passes an empty `int_constants` slice.  Existing callers continue to
791    /// work without modification; the cache-key format for pure-bool pipelines
792    /// is compatible (bool entries carry the 'b' type marker, which is the
793    /// only format ever written by this wrapper).
794    ///
795    /// # Errors
796    ///
797    /// * `MlxError::KernelNotFound` — no source registered for this name.
798    /// * `MlxError::ShaderCompilationError` — MSL compilation, function
799    ///   specialisation, or pipeline creation failed.
800    pub fn get_pipeline_with_bool_constants(
801        &mut self,
802        name: &str,
803        device: &metal::DeviceRef,
804        bool_constants: &[(usize, bool)],
805    ) -> Result<&ComputePipelineState> {
806        self.get_pipeline_with_constants(name, device, bool_constants, &[])
807    }
808
809    /// Check if a pipeline for the given name is already compiled and cached.
810    pub fn is_cached(&self, name: &str) -> bool {
811        self.cache.contains_key(name)
812    }
813
814    /// Number of compiled pipelines currently in the cache.
815    pub fn cached_count(&self) -> usize {
816        self.cache.len()
817    }
818
819    /// Number of registered shader sources.
820    pub fn source_count(&self) -> usize {
821        self.sources.len()
822    }
823}
824
825impl Default for KernelRegistry {
826    fn default() -> Self {
827        Self::new()
828    }
829}
830
831#[cfg(test)]
832mod tests {
833    use super::*;
834
835    /// Minimal Metal shader that uses a single int function constant.
836    ///
837    /// The kernel writes the constant value N into the first element of the
838    /// output buffer, allowing the test to verify that the Metal compiler
839    /// actually sees distinct specialisations for N=4 and N=8.
840    ///
841    /// The shader is intentionally trivial — we only need it to *compile* with
842    /// an int function constant; correctness of the kernel logic is not under
843    /// test here.
844    const INT_FC_TEST_SHADER: &str = r#"
845#include <metal_stdlib>
846using namespace metal;
847
848constant int test_N [[function_constant(100)]];
849
850kernel void int_fc_test_kernel(
851    device int* out [[buffer(0)]],
852    uint tid [[thread_position_in_grid]])
853{
854    if (tid == 0) {
855        out[0] = test_N;
856    }
857}
858"#;
859
860    /// Verify that `get_pipeline_with_constants` produces distinct cached
861    /// pipelines for different i32 function-constant values, and that
862    /// `get_pipeline_with_bool_constants` (the backward-compat wrapper) still
863    /// works correctly with the new 'b'-prefixed cache-key format.
864    ///
865    /// This test requires a real Metal device and is therefore marked
866    /// `#[ignore]` on non-Apple platforms, but runs unconditionally on macOS.
867    #[test]
868    fn test_int_fc_distinct_pipelines_and_bool_compat() {
869        let device = metal::Device::system_default()
870            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
871
872        let mut registry = KernelRegistry::new();
873
874        // Register the inline test shader under a name that cannot collide with
875        // any production kernel.
876        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
877
878        // Compile with N=4.
879        let p4_ptr = registry
880            .get_pipeline_with_constants(
881                "int_fc_test_kernel",
882                &device,
883                &[],                  // no bool constants
884                &[(100, 4_i32)],      // int constant index 100 = 4
885            )
886            .expect("pipeline N=4 should compile") as *const _;
887
888        // Cache must now have exactly 1 entry for this kernel.
889        // (Other production kernels may already be in cache from new(); here
890        // we check that the N=4 key was inserted.)
891        let count_after_n4 = registry.cached_count();
892
893        // Compile with N=8 — must produce a SEPARATE pipeline.
894        let p8_ptr = registry
895            .get_pipeline_with_constants(
896                "int_fc_test_kernel",
897                &device,
898                &[],
899                &[(100, 8_i32)],
900            )
901            .expect("pipeline N=8 should compile") as *const _;
902
903        // Cache must have grown by exactly 1.
904        assert_eq!(
905            registry.cached_count(),
906            count_after_n4 + 1,
907            "N=8 must produce a new cache entry"
908        );
909
910        // The two pipelines must be distinct objects in the cache.
911        assert_ne!(
912            p4_ptr, p8_ptr,
913            "N=4 and N=8 specialisations must be separate ComputePipelineState objects"
914        );
915
916        // A second call with N=4 must return the SAME pipeline (cache hit, no
917        // new compilation).
918        let p4_again_ptr = registry
919            .get_pipeline_with_constants(
920                "int_fc_test_kernel",
921                &device,
922                &[],
923                &[(100, 4_i32)],
924            )
925            .expect("pipeline N=4 cache hit should succeed") as *const _;
926
927        assert_eq!(
928            registry.cached_count(),
929            count_after_n4 + 1,
930            "repeated N=4 call must be a cache hit, not a new entry"
931        );
932        assert_eq!(
933            p4_ptr, p4_again_ptr,
934            "repeated N=4 call must return the same pipeline pointer"
935        );
936
937        // Verify backward compatibility: get_pipeline_with_bool_constants must
938        // still route through get_pipeline_with_constants and produce a cached
939        // pipeline without panicking.
940        //
941        // We register a separate bool-constant shader that does NOT use a bool
942        // function constant (so the Metal compiler ignores missing FCs for
943        // this trivial case) — but the call path and cache-key format are what
944        // matter here.  We reuse the int_fc_test_kernel source; the bool FC is
945        // simply unused by the shader (Metal allows unused FCs when the shader
946        // declares them with `function_constant` but the value is never read).
947        //
948        // To avoid a Metal compiler error for an undeclared function constant,
949        // we register a separate bare-kernel shader for the bool wrapper test.
950        const BARE_SHADER: &str = r#"
951#include <metal_stdlib>
952using namespace metal;
953kernel void bare_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
954    if (tid == 0) { out[0] = 42; }
955}
956"#;
957        registry.register_source("bare_kernel", BARE_SHADER);
958
959        let count_before_bool = registry.cached_count();
960        let _bool_pipeline = registry
961            .get_pipeline_with_bool_constants("bare_kernel", &device, &[])
962            .expect("bool-constants wrapper with empty slice must succeed");
963
964        assert_eq!(
965            registry.cached_count(),
966            count_before_bool + 1,
967            "bool-constants wrapper must insert one new cache entry"
968        );
969    }
970
971    /// Verify that the `MTLComputePipelineState.label` produced by
972    /// `get_pipeline` and `get_pipeline_with_constants` actually propagates
973    /// from the descriptor to the resulting pipeline state.
974    ///
975    /// This is the in-process smoke check for ADR-015 iter9b: we cannot
976    /// reach into xctrace from Rust, but we can read back the same `label`
977    /// property xctrace consumes via `ComputePipelineStateRef::label()`.
978    /// If labels are missing or wrong here, the MST trace will also show
979    /// generic identifiers — so this test gates the iter9 retry's
980    /// per-Q4_0-kernel attribution.
981    #[test]
982    fn test_pipeline_labels_propagate_for_mst() {
983        let device = metal::Device::system_default()
984            .expect("no Metal device — run on Apple Silicon or x86 Mac with Metal support");
985
986        let mut registry = KernelRegistry::new();
987
988        // Reuse the same trivial shaders as the int-FC test.
989        registry.register_source("int_fc_test_kernel", INT_FC_TEST_SHADER);
990
991        const BARE_SHADER_LABEL_TEST: &str = r#"
992#include <metal_stdlib>
993using namespace metal;
994kernel void label_smoke_kernel(device int* out [[buffer(0)]], uint tid [[thread_position_in_grid]]) {
995    if (tid == 0) { out[0] = 7; }
996}
997"#;
998        registry.register_source("label_smoke_kernel", BARE_SHADER_LABEL_TEST);
999
1000        // Plain get_pipeline path — label must equal the kernel name.
1001        // Capture as owned String so the cache borrow is released before
1002        // the next get_pipeline_with_constants call below.
1003        let plain_label = registry
1004            .get_pipeline("label_smoke_kernel", &device)
1005            .expect("plain pipeline must compile")
1006            .label()
1007            .to_string();
1008        assert_eq!(
1009            plain_label, "label_smoke_kernel",
1010            "get_pipeline must label the pipeline with the kernel name (xctrace MST attribution)"
1011        );
1012
1013        // Constants path — label must equal the composite cache key so each
1014        // function-constant variant is individually attributable in MST.
1015        // We capture the label as an owned String to release the borrow on
1016        // the cache before fetching the next specialisation.
1017        let label_v7 = registry
1018            .get_pipeline_with_constants(
1019                "int_fc_test_kernel",
1020                &device,
1021                &[],
1022                &[(100, 7_i32)],
1023            )
1024            .expect("specialised pipeline must compile")
1025            .label()
1026            .to_string();
1027        assert_eq!(
1028            label_v7, "int_fc_test_kernel|100:i7",
1029            "get_pipeline_with_constants must label with the cache_key so each \
1030             specialisation is distinct in xctrace MST"
1031        );
1032
1033        // A second specialisation must produce a different label.
1034        let label_v13 = registry
1035            .get_pipeline_with_constants(
1036                "int_fc_test_kernel",
1037                &device,
1038                &[],
1039                &[(100, 13_i32)],
1040            )
1041            .expect("second specialised pipeline must compile")
1042            .label()
1043            .to_string();
1044        assert_eq!(label_v13, "int_fc_test_kernel|100:i13");
1045        assert_ne!(
1046            label_v7, label_v13,
1047            "distinct constant values must yield distinct pipeline labels"
1048        );
1049    }
1050}