1#![deny(clippy::panic, clippy::unwrap_used, clippy::expect_used)]
41#![allow(unexpected_cfgs)]
45
46#[macro_use]
48mod error;
49mod buffer;
50mod buffer_pool;
51mod device;
52mod dtypes;
53mod encoder;
54mod encoder_session;
55mod kernel_registry;
56mod mem_ranges;
57mod residency;
58pub mod gguf;
59pub mod kernel_profile;
60pub mod graph;
61pub mod metal_capture;
62pub mod ops;
63pub mod turboquant;
64pub mod tq_oracle;
65pub mod weight;
66
67pub use buffer::MlxBuffer;
69pub use buffer_pool::MlxBufferPool;
70pub use device::MlxDevice;
71pub use dtypes::DType;
72pub use encoder::{
73 auto_barrier_concurrent_count, auto_barrier_count, barrier_count, barrier_total_ns,
74 cmd_buf_count, dispatch_count, reset_counters, sync_count, CapturedNode, CapturedOpKind,
75 CommandEncoder, DispatchKind, KernelArg, RecordedBinding,
76};
77pub use encoder_session::EncoderSession;
78pub use mem_ranges::{BufferRange, MemRangeRole, MemRanges};
79pub use error::{MlxError, Result};
80pub use graph::{ComputeGraph, GraphExecutor, GraphSession, OpKind};
81pub use kernel_registry::KernelRegistry;
82#[doc(hidden)]
88pub use residency::{
89 macos_15_or_newer_for_test, reset_residency_env_cache_for_test,
90 reset_residency_test_counters, residency_allocation_count_for_test,
91 residency_commit_call_count_for_test,
92};
93
94pub use gguf::{GgufFile, MetadataValue, TensorInfo};
96
97pub use ops::dense_mm_bf16::{dense_matmul_bf16_f32_tensor, DenseMmBf16F32Params};
99pub use ops::dense_mm_f16::{dense_matmul_f16_f32_tensor, DenseMmF16F32Params};
100pub use ops::dense_mm_f32_f32::{dense_matmul_f32_f32_tensor, DenseMmF32F32Params};
101pub use ops::quantized_matmul::{quantized_matmul, quantized_matmul_simd, QuantizedMatmulParams};
102pub use ops::quantized_matmul_ggml::{
103 dispatch_mm_for_test, quantized_matmul_ggml, quantized_matmul_mm_tensor_perm021,
104 GgmlQuantizedMatmulParams, GgmlQuantizedMatmulPerm021Params, GgmlType,
105 MM_ROUTING_THRESHOLD,
106};
107pub use ops::quantized_matmul_id::{quantized_matmul_id, QuantizedMatmulIdParams};
108pub use ops::quantized_matmul_id_ggml::{
109 dispatch_id_mm_for_test, quantized_matmul_id_ggml, quantized_matmul_id_ggml_pooled,
110 quantized_matmul_id_swiglu_q4_0,
111 GgmlIdMmDispatchParams, GgmlQuantizedMatmulIdParams, IdMmScratch,
112 MM_ID_ROUTING_THRESHOLD,
113};
114
115pub use weight::{
117 load_quantized_weights, safetensors_to_metal_buffer, QuantizationConfig, QuantizedWeight,
118 SafetensorsFile, TensorQuantConfig,
119};
120
121pub use metal::MTLSize;
123pub use metal;
124
125#[cfg(test)]
126#[allow(clippy::expect_used, clippy::unwrap_used, clippy::panic)]
127mod tests {
128 use super::*;
129
130 fn _assert_send<T: Send>() {}
132 fn _assert_sync<T: Sync>() {}
133
134 #[allow(dead_code)]
135 fn assert_send_sync() {
136 _assert_send::<MlxDevice>();
137 _assert_sync::<MlxDevice>();
138 _assert_send::<MlxBuffer>();
139 _assert_sync::<MlxBuffer>();
140 _assert_send::<MlxError>();
141 _assert_sync::<MlxError>();
142 }
143
144 #[test]
146 fn test_device_init() {
147 let device = MlxDevice::new().expect("MlxDevice::new() should succeed on Apple Silicon");
148 let name = device.name();
149 assert!(!name.is_empty(), "Device name should not be empty");
150 println!("Metal device: {name}");
151 }
152
153 #[test]
155 fn test_buffer_alloc() {
156 let device = MlxDevice::new().expect("device");
157 let shape = vec![2, 3, 4];
158 let byte_len = 2 * 3 * 4 * DType::F32.size_of(); let buf = device
160 .alloc_buffer(byte_len, DType::F32, shape.clone())
161 .expect("alloc_buffer");
162
163 assert_eq!(buf.dtype(), DType::F32);
164 assert_eq!(buf.shape(), &shape);
165 assert_eq!(buf.byte_len(), byte_len);
166 assert_eq!(buf.element_count(), 24);
167 }
168
169 #[test]
171 fn test_buffer_readwrite() {
172 let device = MlxDevice::new().expect("device");
173 let n = 64;
174 let byte_len = n * std::mem::size_of::<f32>();
175 let mut buf = device
176 .alloc_buffer(byte_len, DType::F32, vec![n])
177 .expect("alloc_buffer");
178
179 {
181 let slice: &mut [f32] = buf.as_mut_slice().expect("as_mut_slice");
182 assert_eq!(slice.len(), n);
183 for (i, val) in slice.iter_mut().enumerate() {
184 *val = i as f32 * 1.5;
185 }
186 }
187
188 {
190 let slice: &[f32] = buf.as_slice().expect("as_slice");
191 for (i, &val) in slice.iter().enumerate() {
192 let expected = i as f32 * 1.5;
193 assert!(
194 (val - expected).abs() < f32::EPSILON,
195 "Mismatch at index {i}: got {val}, expected {expected}"
196 );
197 }
198 }
199 }
200
201 #[test]
203 fn test_encoder_lifecycle() {
204 let device = MlxDevice::new().expect("device");
205 let mut enc = device.command_encoder().expect("command_encoder");
206 enc.commit_and_wait()
208 .expect("commit_and_wait on empty encoder");
209 }
210
211 #[test]
213 fn test_buffer_pool_reuse() {
214 let device = MlxDevice::new().expect("device");
215 let mut pool = MlxBufferPool::new();
216
217 let buf1 = pool
219 .alloc(&device, 1024, DType::F32, vec![256])
220 .expect("pool alloc 1");
221 let buf1_ptr = buf1.contents_ptr();
222 let buf1_byte_len = buf1.byte_len();
223
224 pool.release(buf1);
226 assert_eq!(pool.free_count(), 1);
227
228 let buf2 = pool
230 .alloc(&device, 1024, DType::F32, vec![256])
231 .expect("pool alloc 2");
232 let buf2_ptr = buf2.contents_ptr();
233 let buf2_byte_len = buf2.byte_len();
234
235 assert_eq!(buf1_ptr, buf2_ptr, "Pool should reuse the same Metal buffer");
236 assert_eq!(buf1_byte_len, buf2_byte_len, "Byte lengths should match");
237 assert_eq!(pool.free_count(), 0, "Free list should be empty after reuse");
238 }
239
240 #[test]
242 fn test_kernel_registry_caching() {
243 let device = MlxDevice::new().expect("device");
244 let mut registry = KernelRegistry::new();
245
246 registry.register_source(
248 "test_add",
249 r#"
250 #include <metal_stdlib>
251 using namespace metal;
252 kernel void test_add(
253 device float *a [[buffer(0)]],
254 device float *b [[buffer(1)]],
255 device float *c [[buffer(2)]],
256 uint id [[thread_position_in_grid]]
257 ) {
258 c[id] = a[id] + b[id];
259 }
260 "#,
261 );
262
263 assert!(!registry.is_cached("test_add"));
265 let p1 = registry
266 .get_pipeline("test_add", device.metal_device())
267 .expect("get_pipeline first call");
268 let p1_ptr = p1 as *const _;
269 assert!(registry.is_cached("test_add"));
270
271 let p2 = registry
273 .get_pipeline("test_add", device.metal_device())
274 .expect("get_pipeline second call");
275 let p2_ptr = p2 as *const _;
276
277 assert_eq!(
278 p1_ptr, p2_ptr,
279 "Second get_pipeline call should return the same cached pipeline"
280 );
281 }
282
283 #[test]
285 fn test_buffer_alloc_zero_len_error() {
286 let device = MlxDevice::new().expect("device");
287 let result = device.alloc_buffer(0, DType::F32, vec![]);
288 assert!(result.is_err(), "Zero-length allocation should fail");
289 match result {
290 Err(MlxError::InvalidArgument(_)) => {}
291 other => panic!("Expected InvalidArgument, got {:?}", other),
292 }
293 }
294
295 #[test]
297 fn test_kernel_not_found() {
298 let device = MlxDevice::new().expect("device");
299 let mut registry = KernelRegistry::new();
300 let result = registry.get_pipeline("nonexistent_kernel", device.metal_device());
301 assert!(result.is_err());
302 match result {
303 Err(MlxError::KernelNotFound(name)) => {
304 assert_eq!(name, "nonexistent_kernel");
305 }
306 other => panic!("Expected KernelNotFound, got {:?}", other),
307 }
308 }
309
310 #[test]
312 fn test_dtype_sizes() {
313 assert_eq!(DType::F32.size_of(), 4);
314 assert_eq!(DType::F16.size_of(), 2);
315 assert_eq!(DType::BF16.size_of(), 2);
316 assert_eq!(DType::U8.size_of(), 1);
317 assert_eq!(DType::U16.size_of(), 2);
318 assert_eq!(DType::U32.size_of(), 4);
319 assert_eq!(DType::I32.size_of(), 4);
320 }
321
322 #[test]
324 fn test_buffer_debug() {
325 let device = MlxDevice::new().expect("device");
326 let buf = device
327 .alloc_buffer(64, DType::F16, vec![4, 8])
328 .expect("alloc_buffer");
329 let debug_str = format!("{:?}", buf);
330 assert!(debug_str.contains("MlxBuffer"));
331 assert!(debug_str.contains("F16"));
332 assert!(debug_str.contains("[4, 8]"));
333 }
334
335 #[test]
337 fn test_error_display() {
338 let e = MlxError::DeviceNotFound;
339 assert!(format!("{e}").contains("Metal GPU device"));
340
341 let e = MlxError::ShaderCompilationError {
342 name: "foo".into(),
343 message: "syntax error".into(),
344 };
345 assert!(format!("{e}").contains("foo"));
346 assert!(format!("{e}").contains("syntax error"));
347 }
348
349 #[test]
351 fn test_buffer_pool_size_buckets() {
352 let device = MlxDevice::new().expect("device");
353 let mut pool = MlxBufferPool::new();
354
355 let buf_100 = pool.alloc(&device, 100, DType::U8, vec![100]).expect("alloc 100");
357 assert!(
358 buf_100.byte_len() >= 100,
359 "Buffer should be at least 100 bytes"
360 );
361 pool.release(buf_100);
362
363 let buf_128 = pool.alloc(&device, 128, DType::U8, vec![128]).expect("alloc 128");
365 assert!(buf_128.byte_len() >= 128);
366 pool.release(buf_128);
367
368 let buf_200 = pool.alloc(&device, 200, DType::U8, vec![200]).expect("alloc 200");
370 assert!(buf_200.byte_len() >= 200);
371 pool.release(buf_200);
372
373 assert_eq!(pool.free_count(), 2, "Two different bucket sizes in pool");
374 }
375}