oxicuda_driver/loader.rs
1//! Dynamic CUDA driver library loader.
2//!
3//! This module is the architectural foundation of `oxicuda-driver`. It locates
4//! and loads the CUDA driver shared library (`libcuda.so` on Linux,
5//! `nvcuda.dll` on Windows) **at runtime** via [`libloading`], so that no CUDA
6//! SDK is required at build time.
7//!
8//! # Platform support
9//!
10//! | Platform | Library names tried | Notes |
11//! |----------|----------------------------------|----------------------------------|
12//! | Linux | `libcuda.so.1`, `libcuda.so` | Installed by NVIDIA driver |
13//! | Windows | `nvcuda.dll` | Ships with the display driver |
14//! | macOS | — | Returns `UnsupportedPlatform` |
15//!
16//! # Usage
17//!
18//! Application code should **not** interact with [`DriverApi`] directly.
19//! Instead, call [`try_driver`] to obtain a reference to the lazily-
20//! initialised global singleton:
21//!
22//! ```rust,no_run
23//! # use oxicuda_driver::loader::try_driver;
24//! let api = try_driver()?;
25//! // api.cu_init, api.cu_device_get, …
26//! # Ok::<(), oxicuda_driver::error::CudaError>(())
27//! ```
28//!
29//! The singleton is stored in a [`OnceLock`] so that the (relatively
30//! expensive) `dlopen` + symbol resolution only happens once, and all
31//! subsequent accesses are a single atomic load.
32
33use std::ffi::{c_char, c_int, c_void};
34use std::sync::OnceLock;
35
36use libloading::Library;
37
38use crate::error::{CudaError, CudaResult, DriverLoadError};
39use crate::ffi::*;
40
41// ---------------------------------------------------------------------------
42// Global singleton
43// ---------------------------------------------------------------------------
44
45/// Global singleton for the driver API function table.
46///
47/// Initialised lazily on the first call to [`try_driver`].
48static DRIVER: OnceLock<Result<DriverApi, DriverLoadError>> = OnceLock::new();
49
50// ---------------------------------------------------------------------------
51// load_sym! helper macro
52// ---------------------------------------------------------------------------
53
54/// Load a single symbol from the shared library and transmute it to the
55/// requested function-pointer type.
56///
57/// # Safety
58///
59/// The caller must ensure that the symbol name matches the actual ABI of the
60/// function pointer type expected at the call site.
61#[cfg(not(target_os = "macos"))]
62macro_rules! load_sym {
63 ($lib:expr, $name:literal) => {{
64 // `Library::get` requires the name as a byte slice. We request the
65 // most general function-pointer type and then transmute to the
66 // concrete signature stored in DriverApi.
67 let sym = unsafe { $lib.get::<unsafe extern "C" fn()>($name.as_bytes()) }.map_err(|e| {
68 DriverLoadError::SymbolNotFound {
69 symbol: $name,
70 reason: e.to_string(),
71 }
72 })?;
73 // SAFETY: we trust that the CUDA driver exports the symbol with the
74 // ABI described by the target field type. The type is inferred from
75 // the DriverApi field this expression is assigned to, so explicit
76 // transmute annotations would require repeating the function-pointer
77 // type at every call site inside a macro — we suppress that lint here.
78 #[allow(clippy::missing_transmute_annotations)]
79 let result = unsafe { std::mem::transmute(*sym) };
80 result
81 }};
82}
83
84/// Load a symbol from the shared library, returning `Some(fn_ptr)` on success
85/// or `None` if the symbol is not found. Used for optional API entry points
86/// that may not be present in older driver versions.
87///
88/// # Safety
89///
90/// Same safety requirements as [`load_sym!`].
91#[cfg(not(target_os = "macos"))]
92macro_rules! load_sym_optional {
93 ($lib:expr, $name:literal) => {{
94 match unsafe { $lib.get::<unsafe extern "C" fn()>($name.as_bytes()) } {
95 Ok(sym) => {
96 // SAFETY: the target type is inferred from the DriverApi field
97 // this value is assigned to. Suppressing the lint here avoids
98 // repeating the function-pointer type at every call site.
99 #[allow(clippy::missing_transmute_annotations)]
100 let fp = unsafe { std::mem::transmute(*sym) };
101 Some(fp)
102 }
103 Err(_) => {
104 tracing::debug!(concat!("optional symbol not found: ", $name));
105 None
106 }
107 }
108 }};
109}
110
111// ---------------------------------------------------------------------------
112// DriverApi
113// ---------------------------------------------------------------------------
114
115/// Complete function-pointer table for the CUDA Driver API.
116///
117/// An instance of this struct is produced by [`DriverApi::load`] and kept
118/// alive for the lifetime of the process inside the `DRIVER` singleton.
119/// The embedded [`Library`] handle ensures the shared object is not unloaded.
120///
121/// # Function pointer groups
122///
123/// The fields are organised into logical groups mirroring the CUDA Driver API
124/// documentation:
125///
126/// * **Initialisation** — [`cu_init`](Self::cu_init)
127/// * **Device management** — `cu_device_*`
128/// * **Context management** — `cu_ctx_*`
129/// * **Module management** — `cu_module_*`
130/// * **Memory management** — `cu_mem_*`, `cu_memcpy_*`, `cu_memset_*`
131/// * **Stream management** — `cu_stream_*`
132/// * **Event management** — `cu_event_*`
133/// * **Kernel launch** — [`cu_launch_kernel`](Self::cu_launch_kernel)
134/// * **Occupancy queries** — `cu_occupancy_*`
135pub struct DriverApi {
136 // Keep the shared library handle alive.
137 _lib: Library,
138
139 // -- Initialisation ----------------------------------------------------
140 /// `cuInit(flags) -> CUresult`
141 ///
142 /// Initialises the CUDA driver API. Must be called before any other
143 /// driver function. Passing `0` for *flags* is the only documented
144 /// value.
145 pub cu_init: unsafe extern "C" fn(flags: u32) -> CUresult,
146
147 // -- Version query -------------------------------------------------------
148 /// `cuDriverGetVersion(driverVersion*) -> CUresult`
149 ///
150 /// Returns the CUDA driver version as `major*1000 + minor*10`.
151 pub cu_driver_get_version: unsafe extern "C" fn(version: *mut c_int) -> CUresult,
152
153 // -- Device management -------------------------------------------------
154 /// `cuDeviceGet(device*, ordinal) -> CUresult`
155 ///
156 /// Returns a handle to a compute device.
157 pub cu_device_get: unsafe extern "C" fn(device: *mut CUdevice, ordinal: c_int) -> CUresult,
158
159 /// `cuDeviceGetCount(count*) -> CUresult`
160 ///
161 /// Returns the number of compute-capable devices.
162 pub cu_device_get_count: unsafe extern "C" fn(count: *mut c_int) -> CUresult,
163
164 /// `cuDeviceGetName(name*, len, dev) -> CUresult`
165 ///
166 /// Returns an ASCII string identifying the device.
167 pub cu_device_get_name:
168 unsafe extern "C" fn(name: *mut c_char, len: c_int, dev: CUdevice) -> CUresult,
169
170 /// `cuDeviceGetAttribute(pi*, attrib, dev) -> CUresult`
171 ///
172 /// Returns information about the device.
173 pub cu_device_get_attribute:
174 unsafe extern "C" fn(pi: *mut c_int, attrib: CUdevice_attribute, dev: CUdevice) -> CUresult,
175
176 /// `cuDeviceTotalMem_v2(bytes*, dev) -> CUresult`
177 ///
178 /// Returns the total amount of memory on the device.
179 pub cu_device_total_mem_v2: unsafe extern "C" fn(bytes: *mut usize, dev: CUdevice) -> CUresult,
180
181 /// `cuDeviceCanAccessPeer(canAccessPeer*, dev, peerDev) -> CUresult`
182 ///
183 /// Queries if a device may directly access a peer device's memory.
184 pub cu_device_can_access_peer:
185 unsafe extern "C" fn(can_access: *mut c_int, dev: CUdevice, peer_dev: CUdevice) -> CUresult,
186
187 // -- Primary context management ----------------------------------------
188 /// `cuDevicePrimaryCtxRetain(pctx*, dev) -> CUresult`
189 ///
190 /// Retains the primary context on the device, creating it if necessary.
191 pub cu_device_primary_ctx_retain:
192 unsafe extern "C" fn(pctx: *mut CUcontext, dev: CUdevice) -> CUresult,
193
194 /// `cuDevicePrimaryCtxRelease_v2(dev) -> CUresult`
195 ///
196 /// Releases the primary context on the device.
197 pub cu_device_primary_ctx_release_v2: unsafe extern "C" fn(dev: CUdevice) -> CUresult,
198
199 /// `cuDevicePrimaryCtxSetFlags_v2(dev, flags) -> CUresult`
200 ///
201 /// Sets flags for the primary context.
202 pub cu_device_primary_ctx_set_flags_v2:
203 unsafe extern "C" fn(dev: CUdevice, flags: u32) -> CUresult,
204
205 /// `cuDevicePrimaryCtxGetState(dev, flags*, active*) -> CUresult`
206 ///
207 /// Returns the state (flags and active status) of the primary context.
208 pub cu_device_primary_ctx_get_state:
209 unsafe extern "C" fn(dev: CUdevice, flags: *mut u32, active: *mut c_int) -> CUresult,
210
211 /// `cuDevicePrimaryCtxReset_v2(dev) -> CUresult`
212 ///
213 /// Resets the primary context on the device.
214 pub cu_device_primary_ctx_reset_v2: unsafe extern "C" fn(dev: CUdevice) -> CUresult,
215
216 // -- Context management ------------------------------------------------
217 /// `cuCtxCreate_v2(pctx*, flags, dev) -> CUresult`
218 ///
219 /// Creates a new CUDA context and associates it with the calling thread.
220 pub cu_ctx_create_v2:
221 unsafe extern "C" fn(pctx: *mut CUcontext, flags: u32, dev: CUdevice) -> CUresult,
222
223 /// `cuCtxDestroy_v2(ctx) -> CUresult`
224 ///
225 /// Destroys a CUDA context.
226 pub cu_ctx_destroy_v2: unsafe extern "C" fn(ctx: CUcontext) -> CUresult,
227
228 /// `cuCtxSetCurrent(ctx) -> CUresult`
229 ///
230 /// Binds the specified CUDA context to the calling CPU thread.
231 pub cu_ctx_set_current: unsafe extern "C" fn(ctx: CUcontext) -> CUresult,
232
233 /// `cuCtxGetCurrent(pctx*) -> CUresult`
234 ///
235 /// Returns the CUDA context bound to the calling CPU thread.
236 pub cu_ctx_get_current: unsafe extern "C" fn(pctx: *mut CUcontext) -> CUresult,
237
238 /// `cuCtxSynchronize() -> CUresult`
239 ///
240 /// Blocks until the device has completed all preceding requested tasks.
241 pub cu_ctx_synchronize: unsafe extern "C" fn() -> CUresult,
242
243 // -- Module management -------------------------------------------------
244 /// `cuModuleLoadData(module*, image*) -> CUresult`
245 ///
246 /// Loads a module from a PTX or cubin image in host memory.
247 pub cu_module_load_data:
248 unsafe extern "C" fn(module: *mut CUmodule, image: *const c_void) -> CUresult,
249
250 /// `cuModuleLoadDataEx(module*, image*, numOptions, options*, optionValues*) -> CUresult`
251 ///
252 /// Loads a module with JIT compiler options.
253 pub cu_module_load_data_ex: unsafe extern "C" fn(
254 module: *mut CUmodule,
255 image: *const c_void,
256 num_options: u32,
257 options: *mut CUjit_option,
258 option_values: *mut *mut c_void,
259 ) -> CUresult,
260
261 /// `cuModuleGetFunction(hfunc*, hmod, name*) -> CUresult`
262 ///
263 /// Returns a handle to a function within a module.
264 pub cu_module_get_function: unsafe extern "C" fn(
265 hfunc: *mut CUfunction,
266 hmod: CUmodule,
267 name: *const c_char,
268 ) -> CUresult,
269
270 /// `cuModuleUnload(hmod) -> CUresult`
271 ///
272 /// Unloads a module from the current context.
273 pub cu_module_unload: unsafe extern "C" fn(hmod: CUmodule) -> CUresult,
274
275 // -- Memory management -------------------------------------------------
276 /// `cuMemAlloc_v2(dptr*, bytesize) -> CUresult`
277 ///
278 /// Allocates device memory.
279 pub cu_mem_alloc_v2: unsafe extern "C" fn(dptr: *mut CUdeviceptr, bytesize: usize) -> CUresult,
280
281 /// `cuMemFree_v2(dptr) -> CUresult`
282 ///
283 /// Frees device memory.
284 pub cu_mem_free_v2: unsafe extern "C" fn(dptr: CUdeviceptr) -> CUresult,
285
286 /// `cuMemcpyHtoD_v2(dst, src*, bytesize) -> CUresult`
287 ///
288 /// Copies data from host memory to device memory.
289 pub cu_memcpy_htod_v2:
290 unsafe extern "C" fn(dst: CUdeviceptr, src: *const c_void, bytesize: usize) -> CUresult,
291
292 /// `cuMemcpyDtoH_v2(dst*, src, bytesize) -> CUresult`
293 ///
294 /// Copies data from device memory to host memory.
295 pub cu_memcpy_dtoh_v2:
296 unsafe extern "C" fn(dst: *mut c_void, src: CUdeviceptr, bytesize: usize) -> CUresult,
297
298 /// `cuMemcpyDtoD_v2(dst, src, bytesize) -> CUresult`
299 ///
300 /// Copies data from device memory to device memory.
301 pub cu_memcpy_dtod_v2:
302 unsafe extern "C" fn(dst: CUdeviceptr, src: CUdeviceptr, bytesize: usize) -> CUresult,
303
304 /// `cuMemcpyHtoDAsync_v2(dst, src*, bytesize, stream) -> CUresult`
305 ///
306 /// Asynchronously copies data from host to device memory.
307 pub cu_memcpy_htod_async_v2: unsafe extern "C" fn(
308 dst: CUdeviceptr,
309 src: *const c_void,
310 bytesize: usize,
311 stream: CUstream,
312 ) -> CUresult,
313
314 /// `cuMemcpyDtoHAsync_v2(dst*, src, bytesize, stream) -> CUresult`
315 ///
316 /// Asynchronously copies data from device to host memory.
317 pub cu_memcpy_dtoh_async_v2: unsafe extern "C" fn(
318 dst: *mut c_void,
319 src: CUdeviceptr,
320 bytesize: usize,
321 stream: CUstream,
322 ) -> CUresult,
323
324 /// `cuMemAllocHost_v2(pp*, bytesize) -> CUresult`
325 ///
326 /// Allocates page-locked (pinned) host memory.
327 pub cu_mem_alloc_host_v2:
328 unsafe extern "C" fn(pp: *mut *mut c_void, bytesize: usize) -> CUresult,
329
330 /// `cuMemFreeHost(p*) -> CUresult`
331 ///
332 /// Frees page-locked host memory.
333 pub cu_mem_free_host: unsafe extern "C" fn(p: *mut c_void) -> CUresult,
334
335 /// `cuMemAllocManaged(dptr*, bytesize, flags) -> CUresult`
336 ///
337 /// Allocates unified memory accessible from both host and device.
338 pub cu_mem_alloc_managed:
339 unsafe extern "C" fn(dptr: *mut CUdeviceptr, bytesize: usize, flags: u32) -> CUresult,
340
341 /// `cuMemsetD8_v2(dst, value, count) -> CUresult`
342 ///
343 /// Sets device memory to a value (byte granularity).
344 pub cu_memset_d8_v2:
345 unsafe extern "C" fn(dst: CUdeviceptr, value: u8, count: usize) -> CUresult,
346
347 /// `cuMemsetD32_v2(dst, value, count) -> CUresult`
348 ///
349 /// Sets device memory to a value (32-bit granularity).
350 pub cu_memset_d32_v2:
351 unsafe extern "C" fn(dst: CUdeviceptr, value: u32, count: usize) -> CUresult,
352
353 /// `cuMemGetInfo_v2(free*, total*) -> CUresult`
354 ///
355 /// Returns free and total memory for the current context's device.
356 pub cu_mem_get_info_v2: unsafe extern "C" fn(free: *mut usize, total: *mut usize) -> CUresult,
357
358 /// `cuMemHostRegister_v2(p*, bytesize, flags) -> CUresult`
359 ///
360 /// Registers an existing host memory range for use by CUDA.
361 pub cu_mem_host_register_v2:
362 unsafe extern "C" fn(p: *mut c_void, bytesize: usize, flags: u32) -> CUresult,
363
364 /// `cuMemHostUnregister(p*) -> CUresult`
365 ///
366 /// Unregisters a memory range that was registered with cuMemHostRegister.
367 pub cu_mem_host_unregister: unsafe extern "C" fn(p: *mut c_void) -> CUresult,
368
369 /// `cuMemHostGetDevicePointer_v2(pdptr*, p*, flags) -> CUresult`
370 ///
371 /// Returns the device pointer mapped to a registered host pointer.
372 pub cu_mem_host_get_device_pointer_v2:
373 unsafe extern "C" fn(pdptr: *mut CUdeviceptr, p: *mut c_void, flags: u32) -> CUresult,
374
375 /// `cuPointerGetAttribute(data*, attribute, ptr) -> CUresult`
376 ///
377 /// Returns information about a pointer.
378 pub cu_pointer_get_attribute:
379 unsafe extern "C" fn(data: *mut c_void, attribute: u32, ptr: CUdeviceptr) -> CUresult,
380
381 /// `cuMemAdvise(devPtr, count, advice, device) -> CUresult`
382 ///
383 /// Advises the unified memory subsystem about usage patterns.
384 pub cu_mem_advise: unsafe extern "C" fn(
385 dev_ptr: CUdeviceptr,
386 count: usize,
387 advice: u32,
388 device: CUdevice,
389 ) -> CUresult,
390
391 /// `cuMemPrefetchAsync(devPtr, count, dstDevice, hStream) -> CUresult`
392 ///
393 /// Prefetches unified memory to the specified device.
394 pub cu_mem_prefetch_async: unsafe extern "C" fn(
395 dev_ptr: CUdeviceptr,
396 count: usize,
397 dst_device: CUdevice,
398 hstream: CUstream,
399 ) -> CUresult,
400
401 // -- Stream management -------------------------------------------------
402 /// `cuStreamCreate(phStream*, flags) -> CUresult`
403 ///
404 /// Creates a stream.
405 pub cu_stream_create: unsafe extern "C" fn(phstream: *mut CUstream, flags: u32) -> CUresult,
406
407 /// `cuStreamCreateWithPriority(phStream*, flags, priority) -> CUresult`
408 ///
409 /// Creates a stream with the given priority.
410 pub cu_stream_create_with_priority:
411 unsafe extern "C" fn(phstream: *mut CUstream, flags: u32, priority: c_int) -> CUresult,
412
413 /// `cuStreamDestroy_v2(hStream) -> CUresult`
414 ///
415 /// Destroys a stream.
416 pub cu_stream_destroy_v2: unsafe extern "C" fn(hstream: CUstream) -> CUresult,
417
418 /// `cuStreamSynchronize(hStream) -> CUresult`
419 ///
420 /// Waits until a stream's tasks are completed.
421 pub cu_stream_synchronize: unsafe extern "C" fn(hstream: CUstream) -> CUresult,
422
423 /// `cuStreamWaitEvent(hStream, hEvent, flags) -> CUresult`
424 ///
425 /// Makes all future work submitted to the stream wait for the event.
426 pub cu_stream_wait_event:
427 unsafe extern "C" fn(hstream: CUstream, hevent: CUevent, flags: u32) -> CUresult,
428
429 /// `cuStreamQuery(hStream) -> CUresult`
430 ///
431 /// Returns `CUDA_SUCCESS` if all operations in the stream have completed,
432 /// `CUDA_ERROR_NOT_READY` if still pending.
433 pub cu_stream_query: unsafe extern "C" fn(hstream: CUstream) -> CUresult,
434
435 /// `cuStreamGetPriority(hStream, priority*) -> CUresult`
436 ///
437 /// Query the priority of `hStream`.
438 pub cu_stream_get_priority:
439 unsafe extern "C" fn(hstream: CUstream, priority: *mut std::ffi::c_int) -> CUresult,
440
441 /// `cuStreamGetFlags(hStream, flags*) -> CUresult`
442 ///
443 /// Query the flags of `hStream`.
444 pub cu_stream_get_flags: unsafe extern "C" fn(hstream: CUstream, flags: *mut u32) -> CUresult,
445
446 // -- Event management --------------------------------------------------
447 /// `cuEventCreate(phEvent*, flags) -> CUresult`
448 ///
449 /// Creates an event.
450 pub cu_event_create: unsafe extern "C" fn(phevent: *mut CUevent, flags: u32) -> CUresult,
451
452 /// `cuEventDestroy_v2(hEvent) -> CUresult`
453 ///
454 /// Destroys an event.
455 pub cu_event_destroy_v2: unsafe extern "C" fn(hevent: CUevent) -> CUresult,
456
457 /// `cuEventRecord(hEvent, hStream) -> CUresult`
458 ///
459 /// Records an event in a stream.
460 pub cu_event_record: unsafe extern "C" fn(hevent: CUevent, hstream: CUstream) -> CUresult,
461
462 /// `cuEventQuery(hEvent) -> CUresult`
463 ///
464 /// Queries the status of an event. Returns `CUDA_SUCCESS` if complete,
465 /// `CUDA_ERROR_NOT_READY` if still pending.
466 pub cu_event_query: unsafe extern "C" fn(hevent: CUevent) -> CUresult,
467
468 /// `cuEventSynchronize(hEvent) -> CUresult`
469 ///
470 /// Waits until an event completes.
471 pub cu_event_synchronize: unsafe extern "C" fn(hevent: CUevent) -> CUresult,
472
473 /// `cuEventElapsedTime(pMilliseconds*, hStart, hEnd) -> CUresult`
474 ///
475 /// Computes the elapsed time between two events.
476 pub cu_event_elapsed_time:
477 unsafe extern "C" fn(pmilliseconds: *mut f32, hstart: CUevent, hend: CUevent) -> CUresult,
478
479 // -- Kernel launch -----------------------------------------------------
480
481 // -- Peer memory access ------------------------------------------------
482 /// `cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, count) -> CUresult`
483 ///
484 /// Copies device memory between two primary contexts.
485 pub cu_memcpy_peer: unsafe extern "C" fn(
486 dst_device: u64,
487 dst_ctx: CUcontext,
488 src_device: u64,
489 src_ctx: CUcontext,
490 count: usize,
491 ) -> CUresult,
492
493 /// `cuMemcpyPeerAsync(..., hStream) -> CUresult`
494 ///
495 /// Asynchronous cross-device copy.
496 pub cu_memcpy_peer_async: unsafe extern "C" fn(
497 dst_device: u64,
498 dst_ctx: CUcontext,
499 src_device: u64,
500 src_ctx: CUcontext,
501 count: usize,
502 stream: CUstream,
503 ) -> CUresult,
504
505 /// `cuCtxEnablePeerAccess(peerContext, flags) -> CUresult`
506 ///
507 /// Enables peer access between two contexts.
508 pub cu_ctx_enable_peer_access:
509 unsafe extern "C" fn(peer_context: CUcontext, flags: u32) -> CUresult,
510
511 /// `cuCtxDisablePeerAccess(peerContext) -> CUresult`
512 ///
513 /// Disables peer access to a context.
514 pub cu_ctx_disable_peer_access: unsafe extern "C" fn(peer_context: CUcontext) -> CUresult,
515 /// `cuLaunchKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY,
516 /// blockDimZ, sharedMemBytes, hStream, kernelParams**, extra**) -> CUresult`
517 ///
518 /// Launches a CUDA kernel.
519 #[allow(clippy::type_complexity)]
520 pub cu_launch_kernel: unsafe extern "C" fn(
521 f: CUfunction,
522 grid_dim_x: u32,
523 grid_dim_y: u32,
524 grid_dim_z: u32,
525 block_dim_x: u32,
526 block_dim_y: u32,
527 block_dim_z: u32,
528 shared_mem_bytes: u32,
529 hstream: CUstream,
530 kernel_params: *mut *mut c_void,
531 extra: *mut *mut c_void,
532 ) -> CUresult,
533
534 /// `cuLaunchCooperativeKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX,
535 /// blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams**) -> CUresult`
536 ///
537 /// Launches a cooperative CUDA kernel (CUDA 9.0+).
538 #[allow(clippy::type_complexity)]
539 pub cu_launch_cooperative_kernel: unsafe extern "C" fn(
540 f: CUfunction,
541 grid_dim_x: u32,
542 grid_dim_y: u32,
543 grid_dim_z: u32,
544 block_dim_x: u32,
545 block_dim_y: u32,
546 block_dim_z: u32,
547 shared_mem_bytes: u32,
548 hstream: CUstream,
549 kernel_params: *mut *mut c_void,
550 ) -> CUresult,
551
552 /// `cuLaunchCooperativeKernelMultiDevice(launchParamsList*, numDevices,
553 /// flags) -> CUresult`
554 ///
555 /// Launches a cooperative kernel across multiple devices (CUDA 9.0+).
556 pub cu_launch_cooperative_kernel_multi_device: unsafe extern "C" fn(
557 launch_params_list: *mut c_void,
558 num_devices: u32,
559 flags: u32,
560 ) -> CUresult,
561
562 // -- Occupancy ---------------------------------------------------------
563 /// `cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks*, func, blockSize,
564 /// dynamicSMemSize) -> CUresult`
565 ///
566 /// Returns the number of the maximum active blocks per streaming
567 /// multiprocessor.
568 pub cu_occupancy_max_active_blocks_per_multiprocessor: unsafe extern "C" fn(
569 num_blocks: *mut c_int,
570 func: CUfunction,
571 block_size: c_int,
572 dynamic_smem_size: usize,
573 ) -> CUresult,
574
575 /// `cuOccupancyMaxPotentialBlockSize(minGridSize*, blockSize*, func,
576 /// blockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit) -> CUresult`
577 ///
578 /// Suggests a launch configuration with reasonable occupancy.
579 #[allow(clippy::type_complexity)]
580 pub cu_occupancy_max_potential_block_size: unsafe extern "C" fn(
581 min_grid_size: *mut c_int,
582 block_size: *mut c_int,
583 func: CUfunction,
584 block_size_to_dynamic_smem_size: Option<unsafe extern "C" fn(c_int) -> usize>,
585 dynamic_smem_size: usize,
586 block_size_limit: c_int,
587 ) -> CUresult,
588
589 /// `cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks*, func,
590 /// blockSize, dynamicSMemSize, flags) -> CUresult`
591 ///
592 /// Like `cuOccupancyMaxActiveBlocksPerMultiprocessor` but with flags
593 /// to control caching behaviour (CUDA 9.0+).
594 pub cu_occupancy_max_active_blocks_per_multiprocessor_with_flags:
595 unsafe extern "C" fn(
596 num_blocks: *mut c_int,
597 func: CUfunction,
598 block_size: c_int,
599 dynamic_smem_size: usize,
600 flags: u32,
601 ) -> CUresult,
602
603 // -- Memory management (optional) -----------------------------------------
604 /// `cuMemcpyDtoDAsync_v2(dst, src, bytesize, stream) -> CUresult`
605 ///
606 /// Asynchronously copies data from device memory to device memory.
607 pub cu_memcpy_dtod_async_v2: Option<
608 unsafe extern "C" fn(
609 dst: CUdeviceptr,
610 src: CUdeviceptr,
611 bytesize: usize,
612 stream: CUstream,
613 ) -> CUresult,
614 >,
615
616 /// `cuMemsetD16_v2(dst, value, count) -> CUresult`
617 ///
618 /// Sets device memory to a value (16-bit granularity).
619 pub cu_memset_d16_v2:
620 Option<unsafe extern "C" fn(dst: CUdeviceptr, value: u16, count: usize) -> CUresult>,
621
622 /// `cuMemsetD32Async(dst, value, count, stream) -> CUresult`
623 ///
624 /// Asynchronously sets device memory to a value (32-bit granularity).
625 pub cu_memset_d32_async: Option<
626 unsafe extern "C" fn(
627 dst: CUdeviceptr,
628 value: u32,
629 count: usize,
630 stream: CUstream,
631 ) -> CUresult,
632 >,
633
634 // -- Context management (optional) ----------------------------------------
635 /// `cuCtxGetLimit(value*, limit) -> CUresult`
636 ///
637 /// Returns the value of a context limit.
638 pub cu_ctx_get_limit: Option<unsafe extern "C" fn(value: *mut usize, limit: u32) -> CUresult>,
639
640 /// `cuCtxSetLimit(limit, value) -> CUresult`
641 ///
642 /// Sets a context limit.
643 pub cu_ctx_set_limit: Option<unsafe extern "C" fn(limit: u32, value: usize) -> CUresult>,
644
645 /// `cuCtxGetCacheConfig(config*) -> CUresult`
646 ///
647 /// Returns the current cache configuration for the context.
648 pub cu_ctx_get_cache_config: Option<unsafe extern "C" fn(config: *mut u32) -> CUresult>,
649
650 /// `cuCtxSetCacheConfig(config) -> CUresult`
651 ///
652 /// Sets the cache configuration for the current context.
653 pub cu_ctx_set_cache_config: Option<unsafe extern "C" fn(config: u32) -> CUresult>,
654
655 /// `cuCtxGetSharedMemConfig(config*) -> CUresult`
656 ///
657 /// Returns the shared memory configuration for the context.
658 pub cu_ctx_get_shared_mem_config: Option<unsafe extern "C" fn(config: *mut u32) -> CUresult>,
659
660 /// `cuCtxSetSharedMemConfig(config) -> CUresult`
661 ///
662 /// Sets the shared memory configuration for the current context.
663 pub cu_ctx_set_shared_mem_config: Option<unsafe extern "C" fn(config: u32) -> CUresult>,
664
665 // -- Event with flags (optional, CUDA 11.1+) ------------------------------
666 /// `cuEventRecordWithFlags(hEvent, hStream, flags) -> CUresult`
667 ///
668 /// Records an event in a stream with additional flags (CUDA 11.1+).
669 /// Falls back to `cu_event_record` when `None`.
670 pub cu_event_record_with_flags:
671 Option<unsafe extern "C" fn(hevent: CUevent, hstream: CUstream, flags: u32) -> CUresult>,
672
673 // -- Function attributes (optional) ---------------------------------------
674 /// `cuFuncGetAttribute(value*, attrib, func) -> CUresult`
675 ///
676 /// Returns information about a function.
677 pub cu_func_get_attribute: Option<
678 unsafe extern "C" fn(value: *mut c_int, attrib: c_int, func: CUfunction) -> CUresult,
679 >,
680
681 /// `cuFuncSetCacheConfig(func, config) -> CUresult`
682 ///
683 /// Sets the cache configuration for a device function.
684 pub cu_func_set_cache_config:
685 Option<unsafe extern "C" fn(func: CUfunction, config: u32) -> CUresult>,
686
687 /// `cuFuncSetSharedMemConfig(func, config) -> CUresult`
688 ///
689 /// Sets the shared memory configuration for a device function.
690 pub cu_func_set_shared_mem_config:
691 Option<unsafe extern "C" fn(func: CUfunction, config: u32) -> CUresult>,
692
693 /// `cuFuncSetAttribute(func, attrib, value) -> CUresult`
694 ///
695 /// Sets an attribute value for a device function.
696 pub cu_func_set_attribute:
697 Option<unsafe extern "C" fn(func: CUfunction, attrib: c_int, value: c_int) -> CUresult>,
698
699 // -- Profiler (optional) --------------------------------------------------
700 /// `cuProfilerStart() -> CUresult`
701 ///
702 /// Starts the CUDA profiler.
703 pub cu_profiler_start: Option<unsafe extern "C" fn() -> CUresult>,
704
705 /// `cuProfilerStop() -> CUresult`
706 ///
707 /// Stops the CUDA profiler.
708 pub cu_profiler_stop: Option<unsafe extern "C" fn() -> CUresult>,
709
710 // -- CUDA 12.x extended launch (optional) ---------------------------------
711 /// `cuLaunchKernelEx(config*, f, kernelParams**, extra**) -> CUresult`
712 ///
713 /// Extended kernel launch with cluster dimensions and other CUDA 12.0+
714 /// attributes. Available only when the driver is CUDA 12.0 or newer.
715 ///
716 /// When `None`, fall back to [`cu_launch_kernel`](Self::cu_launch_kernel).
717 #[allow(clippy::type_complexity)]
718 pub cu_launch_kernel_ex: Option<
719 unsafe extern "C" fn(
720 config: *const CuLaunchConfig,
721 f: CUfunction,
722 kernel_params: *mut *mut std::ffi::c_void,
723 extra: *mut *mut std::ffi::c_void,
724 ) -> CUresult,
725 >,
726
727 /// `cuTensorMapEncodeTiled(tensorMap*, ...) -> CUresult`
728 ///
729 /// Creates a TMA tensor map descriptor for tiled access patterns.
730 /// Available on CUDA 12.0+ with sm_90+ (Hopper/Blackwell).
731 ///
732 /// When `None`, TMA is not supported by the loaded driver.
733 #[allow(clippy::type_complexity)]
734 pub cu_tensor_map_encode_tiled: Option<
735 unsafe extern "C" fn(
736 tensor_map: *mut std::ffi::c_void,
737 tensor_data_type: u32,
738 tensor_rank: u32,
739 global_address: *mut std::ffi::c_void,
740 global_dim: *const u64,
741 global_strides: *const u64,
742 box_dim: *const u32,
743 element_strides: *const u32,
744 interleave: u32,
745 swizzle: u32,
746 l2_promotion: u32,
747 oob_fill: u32,
748 ) -> CUresult,
749 >,
750
751 // -- CUDA 12.8+ extended API (optional) -----------------------------------
752 /// `cuTensorMapEncodeTiledMemref(tensorMap*, ...) -> CUresult`
753 ///
754 /// Extended TMA encoding using memref descriptors (CUDA 12.8+,
755 /// Blackwell sm_100/sm_120). When `None`, fall back to
756 /// [`cu_tensor_map_encode_tiled`](Self::cu_tensor_map_encode_tiled).
757 #[allow(clippy::type_complexity)]
758 pub cu_tensor_map_encode_tiled_memref: Option<
759 unsafe extern "C" fn(
760 tensor_map: *mut c_void,
761 tensor_data_type: u32,
762 tensor_rank: u32,
763 global_address: *mut c_void,
764 global_dim: *const u64,
765 global_strides: *const u64,
766 box_dim: *const u32,
767 element_strides: *const u32,
768 interleave: u32,
769 swizzle: u32,
770 l2_promotion: u32,
771 oob_fill: u32,
772 flags: u64,
773 ) -> CUresult,
774 >,
775
776 /// `cuKernelGetLibrary(pLib*, kernel) -> CUresult`
777 ///
778 /// Returns the library handle that owns a given kernel handle
779 /// (CUDA 12.8+). When `None`, the driver does not support the JIT
780 /// library API.
781 pub cu_kernel_get_library:
782 Option<unsafe extern "C" fn(p_lib: *mut CUlibrary, kernel: CUkernel) -> CUresult>,
783
784 /// `cuMulticastGetGranularity(granularity*, desc*, option) -> CUresult`
785 ///
786 /// Queries the recommended memory granularity for an NVLink multicast
787 /// object (CUDA 12.8+). When `None`, multicast memory is not supported.
788 pub cu_multicast_get_granularity: Option<
789 unsafe extern "C" fn(granularity: *mut usize, desc: *const c_void, option: u32) -> CUresult,
790 >,
791
792 /// `cuMulticastCreate(mcHandle*, desc*) -> CUresult`
793 ///
794 /// Creates an NVLink multicast object for cross-GPU broadcast memory
795 /// (CUDA 12.8+). When `None`, multicast memory is not supported.
796 pub cu_multicast_create: Option<
797 unsafe extern "C" fn(mc_handle: *mut CUmulticastObject, desc: *const c_void) -> CUresult,
798 >,
799
800 /// `cuMulticastAddDevice(mcHandle, dev) -> CUresult`
801 ///
802 /// Adds a device to an NVLink multicast group (CUDA 12.8+). When
803 /// `None`, multicast memory is not supported.
804 pub cu_multicast_add_device:
805 Option<unsafe extern "C" fn(mc_handle: CUmulticastObject, dev: CUdevice) -> CUresult>,
806
807 /// `cuMemcpyBatchAsync(dsts*, srcs*, sizes*, count, flags, stream) -> CUresult`
808 ///
809 /// Issues *count* asynchronous memory copies (H2D, D2H, or D2D) in a
810 /// single driver call (CUDA 12.8+). When `None`, issue individual
811 /// `cuMemcpyAsync` calls as a fallback.
812 #[allow(clippy::type_complexity)]
813 pub cu_memcpy_batch_async: Option<
814 unsafe extern "C" fn(
815 dsts: *const *mut c_void,
816 srcs: *const *const c_void,
817 sizes: *const usize,
818 count: u64,
819 flags: u64,
820 stream: CUstream,
821 ) -> CUresult,
822 >,
823
824 // -- Texture / Surface memory (optional) ----------------------------------
825 /// `cuArrayCreate_v2(pHandle*, pAllocateArray*) -> CUresult`
826 ///
827 /// Allocates a 1-D or 2-D CUDA array. When `None`, CUDA array allocation
828 /// is not supported by the loaded driver.
829 pub cu_array_create_v2: Option<
830 unsafe extern "C" fn(
831 p_handle: *mut CUarray,
832 p_allocate_array: *const CUDA_ARRAY_DESCRIPTOR,
833 ) -> CUresult,
834 >,
835
836 /// `cuArrayDestroy(hArray) -> CUresult`
837 ///
838 /// Frees a CUDA array previously allocated by `cuArrayCreate_v2`.
839 pub cu_array_destroy: Option<unsafe extern "C" fn(h_array: CUarray) -> CUresult>,
840
841 /// `cuArrayGetDescriptor_v2(pArrayDescriptor*, hArray) -> CUresult`
842 ///
843 /// Returns the descriptor of a 1-D or 2-D CUDA array.
844 pub cu_array_get_descriptor_v2: Option<
845 unsafe extern "C" fn(
846 p_array_descriptor: *mut CUDA_ARRAY_DESCRIPTOR,
847 h_array: CUarray,
848 ) -> CUresult,
849 >,
850
851 /// `cuArray3DCreate_v2(pHandle*, pAllocateArray*) -> CUresult`
852 ///
853 /// Allocates a 3-D CUDA array (also supports layered and cubemap arrays).
854 pub cu_array3d_create_v2: Option<
855 unsafe extern "C" fn(
856 p_handle: *mut CUarray,
857 p_allocate_array: *const CUDA_ARRAY3D_DESCRIPTOR,
858 ) -> CUresult,
859 >,
860
861 /// `cuArray3DGetDescriptor_v2(pArrayDescriptor*, hArray) -> CUresult`
862 ///
863 /// Returns the descriptor of a 3-D CUDA array.
864 pub cu_array3d_get_descriptor_v2: Option<
865 unsafe extern "C" fn(
866 p_array_descriptor: *mut CUDA_ARRAY3D_DESCRIPTOR,
867 h_array: CUarray,
868 ) -> CUresult,
869 >,
870
871 /// `cuMemcpyHtoA_v2(dstArray, dstOffset, srcHost*, ByteCount) -> CUresult`
872 ///
873 /// Synchronously copies host memory into a CUDA array.
874 pub cu_memcpy_htoa_v2: Option<
875 unsafe extern "C" fn(
876 dst_array: CUarray,
877 dst_offset: usize,
878 src_host: *const c_void,
879 byte_count: usize,
880 ) -> CUresult,
881 >,
882
883 /// `cuMemcpyAtoH_v2(dstHost*, srcArray, srcOffset, ByteCount) -> CUresult`
884 ///
885 /// Synchronously copies data from a CUDA array into host memory.
886 pub cu_memcpy_atoh_v2: Option<
887 unsafe extern "C" fn(
888 dst_host: *mut c_void,
889 src_array: CUarray,
890 src_offset: usize,
891 byte_count: usize,
892 ) -> CUresult,
893 >,
894
895 /// `cuMemcpyHtoAAsync_v2(dstArray, dstOffset, srcHost*, byteCount, stream) -> CUresult`
896 ///
897 /// Asynchronously copies host memory into a CUDA array on a stream.
898 pub cu_memcpy_htoa_async_v2: Option<
899 unsafe extern "C" fn(
900 dst_array: CUarray,
901 dst_offset: usize,
902 src_host: *const c_void,
903 byte_count: usize,
904 stream: CUstream,
905 ) -> CUresult,
906 >,
907
908 /// `cuMemcpyAtoHAsync_v2(dstHost*, srcArray, srcOffset, byteCount, stream) -> CUresult`
909 ///
910 /// Asynchronously copies data from a CUDA array into host memory on a stream.
911 pub cu_memcpy_atoh_async_v2: Option<
912 unsafe extern "C" fn(
913 dst_host: *mut c_void,
914 src_array: CUarray,
915 src_offset: usize,
916 byte_count: usize,
917 stream: CUstream,
918 ) -> CUresult,
919 >,
920
921 /// `cuTexObjectCreate(pTexObject*, pResDesc*, pTexDesc*, pResViewDesc*) -> CUresult`
922 ///
923 /// Creates a texture object from a resource descriptor, texture descriptor,
924 /// and optional resource-view descriptor (CUDA 5.0+).
925 pub cu_tex_object_create: Option<
926 unsafe extern "C" fn(
927 p_tex_object: *mut CUtexObject,
928 p_res_desc: *const CUDA_RESOURCE_DESC,
929 p_tex_desc: *const CUDA_TEXTURE_DESC,
930 p_res_view_desc: *const CUDA_RESOURCE_VIEW_DESC,
931 ) -> CUresult,
932 >,
933
934 /// `cuTexObjectDestroy(texObject) -> CUresult`
935 ///
936 /// Destroys a texture object created by `cuTexObjectCreate`.
937 pub cu_tex_object_destroy: Option<unsafe extern "C" fn(tex_object: CUtexObject) -> CUresult>,
938
939 /// `cuTexObjectGetResourceDesc(pResDesc*, texObject) -> CUresult`
940 ///
941 /// Returns the resource descriptor of a texture object.
942 pub cu_tex_object_get_resource_desc: Option<
943 unsafe extern "C" fn(
944 p_res_desc: *mut CUDA_RESOURCE_DESC,
945 tex_object: CUtexObject,
946 ) -> CUresult,
947 >,
948
949 /// `cuSurfObjectCreate(pSurfObject*, pResDesc*) -> CUresult`
950 ///
951 /// Creates a surface object from a resource descriptor (CUDA 5.0+).
952 /// The resource type must be `Array` (surface-capable CUDA arrays only).
953 pub cu_surf_object_create: Option<
954 unsafe extern "C" fn(
955 p_surf_object: *mut CUsurfObject,
956 p_res_desc: *const CUDA_RESOURCE_DESC,
957 ) -> CUresult,
958 >,
959
960 /// `cuSurfObjectDestroy(surfObject) -> CUresult`
961 ///
962 /// Destroys a surface object created by `cuSurfObjectCreate`.
963 pub cu_surf_object_destroy: Option<unsafe extern "C" fn(surf_object: CUsurfObject) -> CUresult>,
964}
965
966// SAFETY: All fields are plain function pointers (which are Send + Sync) and
967// the Library handle is kept alive but never mutated.
968unsafe impl Send for DriverApi {}
969unsafe impl Sync for DriverApi {}
970
971// ---------------------------------------------------------------------------
972// DriverApi — construction
973// ---------------------------------------------------------------------------
974
975impl DriverApi {
976 /// Attempt to dynamically load the CUDA driver shared library and resolve
977 /// every required symbol.
978 ///
979 /// # Platform behaviour
980 ///
981 /// * **macOS** — immediately returns [`DriverLoadError::UnsupportedPlatform`].
982 /// * **Linux** — tries `libcuda.so.1` then `libcuda.so`.
983 /// * **Windows** — tries `nvcuda.dll`.
984 ///
985 /// # Errors
986 ///
987 /// * [`DriverLoadError::UnsupportedPlatform`] on macOS.
988 /// * [`DriverLoadError::LibraryNotFound`] if none of the candidate library
989 /// names could be opened.
990 /// * [`DriverLoadError::SymbolNotFound`] if a required CUDA entry point is
991 /// missing from the loaded library.
992 pub fn load() -> Result<Self, DriverLoadError> {
993 // macOS: CUDA is not and will not be supported.
994 #[cfg(target_os = "macos")]
995 {
996 Err(DriverLoadError::UnsupportedPlatform)
997 }
998
999 // Linux library search order.
1000 #[cfg(target_os = "linux")]
1001 let lib_names: &[&str] = &["libcuda.so.1", "libcuda.so"];
1002
1003 // Windows library search order.
1004 #[cfg(target_os = "windows")]
1005 let lib_names: &[&str] = &["nvcuda.dll"];
1006
1007 #[cfg(not(target_os = "macos"))]
1008 {
1009 let lib = Self::load_library(lib_names)?;
1010 let api = Self::load_symbols(lib)?;
1011 // `cuInit(0)` must be called before any other CUDA driver API.
1012 // This mirrors what `libcudart` does internally on the first CUDA
1013 // Runtime call. We call it unconditionally here so that all
1014 // `try_driver()` callers get a fully initialised driver without
1015 // each needing to call `cuInit` themselves.
1016 //
1017 // SAFETY: `api.cu_init` was just resolved from the shared library.
1018 // Passing flags=0 is the only documented value.
1019 let rc = unsafe { (api.cu_init)(0) };
1020 if rc != 0 {
1021 // Propagate the error; the OnceLock will store this Err and
1022 // return CudaError::NotInitialized on every subsequent
1023 // try_driver() call — matching behaviour on no-GPU machines.
1024 return Err(DriverLoadError::InitializationFailed { code: rc as u32 });
1025 }
1026 Ok(api)
1027 }
1028 }
1029
1030 /// Try each candidate library name in order, returning the first that
1031 /// loads successfully.
1032 ///
1033 /// # Errors
1034 ///
1035 /// Returns [`DriverLoadError::LibraryNotFound`] if **all** candidates
1036 /// fail to load, capturing the last OS-level error message.
1037 #[cfg(not(target_os = "macos"))]
1038 fn load_library(names: &[&str]) -> Result<Library, DriverLoadError> {
1039 let mut last_error = String::new();
1040 for name in names {
1041 // SAFETY: loading a shared library has side-effects (running its
1042 // init routines), but the CUDA driver library is designed for
1043 // this.
1044 match unsafe { Library::new(*name) } {
1045 Ok(lib) => {
1046 tracing::debug!("loaded CUDA driver library: {name}");
1047 return Ok(lib);
1048 }
1049 Err(e) => {
1050 tracing::debug!("failed to load {name}: {e}");
1051 last_error = e.to_string();
1052 }
1053 }
1054 }
1055
1056 Err(DriverLoadError::LibraryNotFound {
1057 candidates: names.iter().map(|s| (*s).to_string()).collect(),
1058 last_error,
1059 })
1060 }
1061
1062 /// Resolve every required CUDA driver symbol from the loaded library and
1063 /// assemble the [`DriverApi`] function table.
1064 ///
1065 /// # Errors
1066 ///
1067 /// Returns [`DriverLoadError::SymbolNotFound`] if any symbol cannot be
1068 /// resolved.
1069 #[cfg(not(target_os = "macos"))]
1070 fn load_symbols(lib: Library) -> Result<Self, DriverLoadError> {
1071 Ok(Self {
1072 // -- Initialisation ------------------------------------------------
1073 cu_init: load_sym!(lib, "cuInit"),
1074
1075 // -- Version query -------------------------------------------------
1076 cu_driver_get_version: load_sym!(lib, "cuDriverGetVersion"),
1077
1078 // -- Device management ---------------------------------------------
1079 cu_device_get: load_sym!(lib, "cuDeviceGet"),
1080 cu_device_get_count: load_sym!(lib, "cuDeviceGetCount"),
1081 cu_device_get_name: load_sym!(lib, "cuDeviceGetName"),
1082 cu_device_get_attribute: load_sym!(lib, "cuDeviceGetAttribute"),
1083 cu_device_total_mem_v2: load_sym!(lib, "cuDeviceTotalMem_v2"),
1084 cu_device_can_access_peer: load_sym!(lib, "cuDeviceCanAccessPeer"),
1085
1086 // -- Primary context management ------------------------------------
1087 cu_device_primary_ctx_retain: load_sym!(lib, "cuDevicePrimaryCtxRetain"),
1088 cu_device_primary_ctx_release_v2: load_sym!(lib, "cuDevicePrimaryCtxRelease_v2"),
1089 cu_device_primary_ctx_set_flags_v2: load_sym!(lib, "cuDevicePrimaryCtxSetFlags_v2"),
1090 cu_device_primary_ctx_get_state: load_sym!(lib, "cuDevicePrimaryCtxGetState"),
1091 cu_device_primary_ctx_reset_v2: load_sym!(lib, "cuDevicePrimaryCtxReset_v2"),
1092
1093 // -- Context management --------------------------------------------
1094 cu_ctx_create_v2: load_sym!(lib, "cuCtxCreate_v2"),
1095 cu_ctx_destroy_v2: load_sym!(lib, "cuCtxDestroy_v2"),
1096 cu_ctx_set_current: load_sym!(lib, "cuCtxSetCurrent"),
1097 cu_ctx_get_current: load_sym!(lib, "cuCtxGetCurrent"),
1098 cu_ctx_synchronize: load_sym!(lib, "cuCtxSynchronize"),
1099
1100 // -- Module management ---------------------------------------------
1101 cu_module_load_data: load_sym!(lib, "cuModuleLoadData"),
1102 cu_module_load_data_ex: load_sym!(lib, "cuModuleLoadDataEx"),
1103 cu_module_get_function: load_sym!(lib, "cuModuleGetFunction"),
1104 cu_module_unload: load_sym!(lib, "cuModuleUnload"),
1105
1106 // -- Memory management ---------------------------------------------
1107 cu_mem_alloc_v2: load_sym!(lib, "cuMemAlloc_v2"),
1108 cu_mem_free_v2: load_sym!(lib, "cuMemFree_v2"),
1109 cu_memcpy_htod_v2: load_sym!(lib, "cuMemcpyHtoD_v2"),
1110 cu_memcpy_dtoh_v2: load_sym!(lib, "cuMemcpyDtoH_v2"),
1111 cu_memcpy_dtod_v2: load_sym!(lib, "cuMemcpyDtoD_v2"),
1112 cu_memcpy_htod_async_v2: load_sym!(lib, "cuMemcpyHtoDAsync_v2"),
1113 cu_memcpy_dtoh_async_v2: load_sym!(lib, "cuMemcpyDtoHAsync_v2"),
1114 cu_mem_alloc_host_v2: load_sym!(lib, "cuMemAllocHost_v2"),
1115 cu_mem_free_host: load_sym!(lib, "cuMemFreeHost"),
1116 cu_mem_alloc_managed: load_sym!(lib, "cuMemAllocManaged"),
1117 cu_memset_d8_v2: load_sym!(lib, "cuMemsetD8_v2"),
1118 cu_memset_d32_v2: load_sym!(lib, "cuMemsetD32_v2"),
1119 cu_mem_get_info_v2: load_sym!(lib, "cuMemGetInfo_v2"),
1120 cu_mem_host_register_v2: load_sym!(lib, "cuMemHostRegister_v2"),
1121 cu_mem_host_unregister: load_sym!(lib, "cuMemHostUnregister"),
1122 cu_mem_host_get_device_pointer_v2: load_sym!(lib, "cuMemHostGetDevicePointer_v2"),
1123 cu_pointer_get_attribute: load_sym!(lib, "cuPointerGetAttribute"),
1124 cu_mem_advise: load_sym!(lib, "cuMemAdvise"),
1125 cu_mem_prefetch_async: load_sym!(lib, "cuMemPrefetchAsync"),
1126
1127 // -- Stream management ---------------------------------------------
1128 cu_stream_create: load_sym!(lib, "cuStreamCreate"),
1129 cu_stream_create_with_priority: load_sym!(lib, "cuStreamCreateWithPriority"),
1130 cu_stream_destroy_v2: load_sym!(lib, "cuStreamDestroy_v2"),
1131 cu_stream_synchronize: load_sym!(lib, "cuStreamSynchronize"),
1132 cu_stream_wait_event: load_sym!(lib, "cuStreamWaitEvent"),
1133 cu_stream_query: load_sym!(lib, "cuStreamQuery"),
1134 cu_stream_get_priority: load_sym!(lib, "cuStreamGetPriority"),
1135 cu_stream_get_flags: load_sym!(lib, "cuStreamGetFlags"),
1136
1137 // -- Event management ----------------------------------------------
1138 cu_event_create: load_sym!(lib, "cuEventCreate"),
1139 cu_event_destroy_v2: load_sym!(lib, "cuEventDestroy_v2"),
1140 cu_event_record: load_sym!(lib, "cuEventRecord"),
1141 cu_event_query: load_sym!(lib, "cuEventQuery"),
1142 cu_event_synchronize: load_sym!(lib, "cuEventSynchronize"),
1143 cu_event_elapsed_time: load_sym!(lib, "cuEventElapsedTime"),
1144 cu_event_record_with_flags: load_sym_optional!(lib, "cuEventRecordWithFlags"),
1145
1146 // -- Peer memory access -------------------------------------------
1147 cu_memcpy_peer: load_sym!(lib, "cuMemcpyPeer"),
1148 cu_memcpy_peer_async: load_sym!(lib, "cuMemcpyPeerAsync"),
1149 cu_ctx_enable_peer_access: load_sym!(lib, "cuCtxEnablePeerAccess"),
1150 cu_ctx_disable_peer_access: load_sym!(lib, "cuCtxDisablePeerAccess"),
1151
1152 // -- Kernel launch -------------------------------------------------
1153 cu_launch_kernel: load_sym!(lib, "cuLaunchKernel"),
1154 cu_launch_cooperative_kernel: load_sym!(lib, "cuLaunchCooperativeKernel"),
1155 cu_launch_cooperative_kernel_multi_device: load_sym!(
1156 lib,
1157 "cuLaunchCooperativeKernelMultiDevice"
1158 ),
1159
1160 // -- Occupancy -----------------------------------------------------
1161 cu_occupancy_max_active_blocks_per_multiprocessor: load_sym!(
1162 lib,
1163 "cuOccupancyMaxActiveBlocksPerMultiprocessor"
1164 ),
1165 cu_occupancy_max_potential_block_size: load_sym!(
1166 lib,
1167 "cuOccupancyMaxPotentialBlockSize"
1168 ),
1169 cu_occupancy_max_active_blocks_per_multiprocessor_with_flags: load_sym!(
1170 lib,
1171 "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"
1172 ),
1173
1174 // -- Memory management (optional) ---------------------------------
1175 cu_memcpy_dtod_async_v2: load_sym_optional!(lib, "cuMemcpyDtoDAsync_v2"),
1176 cu_memset_d16_v2: load_sym_optional!(lib, "cuMemsetD16_v2"),
1177 cu_memset_d32_async: load_sym_optional!(lib, "cuMemsetD32Async"),
1178
1179 // -- Context management (optional) --------------------------------
1180 cu_ctx_get_limit: load_sym_optional!(lib, "cuCtxGetLimit"),
1181 cu_ctx_set_limit: load_sym_optional!(lib, "cuCtxSetLimit"),
1182 cu_ctx_get_cache_config: load_sym_optional!(lib, "cuCtxGetCacheConfig"),
1183 cu_ctx_set_cache_config: load_sym_optional!(lib, "cuCtxSetCacheConfig"),
1184 cu_ctx_get_shared_mem_config: load_sym_optional!(lib, "cuCtxGetSharedMemConfig"),
1185 cu_ctx_set_shared_mem_config: load_sym_optional!(lib, "cuCtxSetSharedMemConfig"),
1186
1187 // -- Function attributes (optional) -------------------------------
1188 cu_func_get_attribute: load_sym_optional!(lib, "cuFuncGetAttribute"),
1189 cu_func_set_cache_config: load_sym_optional!(lib, "cuFuncSetCacheConfig"),
1190 cu_func_set_shared_mem_config: load_sym_optional!(lib, "cuFuncSetSharedMemConfig"),
1191 cu_func_set_attribute: load_sym_optional!(lib, "cuFuncSetAttribute"),
1192
1193 // -- Profiler (optional) ------------------------------------------
1194 cu_profiler_start: load_sym_optional!(lib, "cuProfilerStart"),
1195 cu_profiler_stop: load_sym_optional!(lib, "cuProfilerStop"),
1196
1197 // -- CUDA 12.x extended launch (optional) -------------------------
1198 cu_launch_kernel_ex: load_sym_optional!(lib, "cuLaunchKernelEx"),
1199 cu_tensor_map_encode_tiled: load_sym_optional!(lib, "cuTensorMapEncodeTiled"),
1200
1201 // -- CUDA 12.8+ extended API (optional) ---------------------------
1202 cu_tensor_map_encode_tiled_memref: load_sym_optional!(
1203 lib,
1204 "cuTensorMapEncodeTiledMemref"
1205 ),
1206 cu_kernel_get_library: load_sym_optional!(lib, "cuKernelGetLibrary"),
1207 cu_multicast_get_granularity: load_sym_optional!(lib, "cuMulticastGetGranularity"),
1208 cu_multicast_create: load_sym_optional!(lib, "cuMulticastCreate"),
1209 cu_multicast_add_device: load_sym_optional!(lib, "cuMulticastAddDevice"),
1210 cu_memcpy_batch_async: load_sym_optional!(lib, "cuMemcpyBatchAsync"),
1211
1212 // -- Texture / Surface memory (optional) ---------------------------
1213 cu_array_create_v2: load_sym_optional!(lib, "cuArrayCreate_v2"),
1214 cu_array_destroy: load_sym_optional!(lib, "cuArrayDestroy"),
1215 cu_array_get_descriptor_v2: load_sym_optional!(lib, "cuArrayGetDescriptor_v2"),
1216 cu_array3d_create_v2: load_sym_optional!(lib, "cuArray3DCreate_v2"),
1217 cu_array3d_get_descriptor_v2: load_sym_optional!(lib, "cuArray3DGetDescriptor_v2"),
1218 cu_memcpy_htoa_v2: load_sym_optional!(lib, "cuMemcpyHtoA_v2"),
1219 cu_memcpy_atoh_v2: load_sym_optional!(lib, "cuMemcpyAtoH_v2"),
1220 cu_memcpy_htoa_async_v2: load_sym_optional!(lib, "cuMemcpyHtoAAsync_v2"),
1221 cu_memcpy_atoh_async_v2: load_sym_optional!(lib, "cuMemcpyAtoHAsync_v2"),
1222 cu_tex_object_create: load_sym_optional!(lib, "cuTexObjectCreate"),
1223 cu_tex_object_destroy: load_sym_optional!(lib, "cuTexObjectDestroy"),
1224 cu_tex_object_get_resource_desc: load_sym_optional!(lib, "cuTexObjectGetResourceDesc"),
1225 cu_surf_object_create: load_sym_optional!(lib, "cuSurfObjectCreate"),
1226 cu_surf_object_destroy: load_sym_optional!(lib, "cuSurfObjectDestroy"),
1227
1228 // Keep the library handle alive.
1229 _lib: lib,
1230 })
1231 }
1232}
1233
1234// ---------------------------------------------------------------------------
1235// Global accessor
1236// ---------------------------------------------------------------------------
1237
1238/// Get a reference to the lazily-loaded CUDA driver API function table.
1239///
1240/// On the first call, this function dynamically loads the CUDA shared library
1241/// and resolves all required symbols. Subsequent calls return the cached
1242/// result with only an atomic load.
1243///
1244/// # Errors
1245///
1246/// Returns [`CudaError::NotInitialized`] if the driver could not be loaded —
1247/// for instance, on macOS, or on a system without an NVIDIA GPU driver
1248/// installed.
1249///
1250/// # Examples
1251///
1252/// ```rust,no_run
1253/// # use oxicuda_driver::loader::try_driver;
1254/// let api = try_driver()?;
1255/// let result = unsafe { (api.cu_init)(0) };
1256/// # Ok::<(), oxicuda_driver::error::CudaError>(())
1257/// ```
1258pub fn try_driver() -> CudaResult<&'static DriverApi> {
1259 let result = DRIVER.get_or_init(DriverApi::load);
1260 match result {
1261 Ok(api) => Ok(api),
1262 Err(_) => Err(CudaError::NotInitialized),
1263 }
1264}
1265
1266// ---------------------------------------------------------------------------
1267// Tests
1268// ---------------------------------------------------------------------------
1269
1270#[cfg(test)]
1271mod tests {
1272 use super::*;
1273
1274 /// On macOS, loading should always fail with `UnsupportedPlatform`.
1275 #[cfg(target_os = "macos")]
1276 #[test]
1277 fn load_returns_unsupported_on_macos() {
1278 let result = DriverApi::load();
1279 assert!(result.is_err(), "expected Err on macOS");
1280 let err = match result {
1281 Err(e) => e,
1282 Ok(_) => panic!("expected Err on macOS"),
1283 };
1284 assert!(
1285 matches!(err, DriverLoadError::UnsupportedPlatform),
1286 "expected UnsupportedPlatform, got {err:?}"
1287 );
1288 }
1289
1290 /// `try_driver` should return `Err(NotInitialized)` on platforms without
1291 /// a CUDA driver (including macOS).
1292 #[cfg(target_os = "macos")]
1293 #[test]
1294 fn try_driver_returns_not_initialized_on_macos() {
1295 let result = try_driver();
1296 assert!(result.is_err(), "expected Err on macOS");
1297 let err = match result {
1298 Err(e) => e,
1299 Ok(_) => panic!("expected Err on macOS"),
1300 };
1301 assert!(
1302 matches!(err, CudaError::NotInitialized),
1303 "expected NotInitialized, got {err:?}"
1304 );
1305 }
1306
1307 // -----------------------------------------------------------------------
1308 // Task 1 — CUDA 12.8+ DriverApi struct layout tests
1309 //
1310 // These tests verify that the DriverApi struct contains the expected
1311 // Option<fn(...)> fields for the new CUDA 12.8+ API entry points.
1312 // They compile and run without a GPU because they only inspect type
1313 // layout and field presence, never calling the function pointers.
1314 // -----------------------------------------------------------------------
1315
1316 /// Verify that the `cu_tensor_map_encode_tiled_memref` field exists and
1317 /// is an `Option` type. The driver will return `None` on older versions.
1318 #[test]
1319 fn driver_v12_8_api_fields_present() {
1320 // The simplest way to prove a field exists at the correct type is to
1321 // construct a value that fits in that position. We use a local
1322 // DriverApi value on macOS (where load() always returns Err) by
1323 // manufacturing a dummy function pointer and verifying the type
1324 // annotation compiles.
1325 //
1326 // On non-macOS platforms we simply verify the field is accessible on
1327 // the type via a None literal assignment (compile-time check).
1328 type TensorMapEncodeTiledFn = unsafe extern "C" fn(
1329 tensor_map: *mut std::ffi::c_void,
1330 tensor_data_type: u32,
1331 tensor_rank: u32,
1332 global_address: *mut std::ffi::c_void,
1333 global_dim: *const u64,
1334 global_strides: *const u64,
1335 box_dim: *const u32,
1336 element_strides: *const u32,
1337 interleave: u32,
1338 swizzle: u32,
1339 l2_promotion: u32,
1340 oob_fill: u32,
1341 flags: u64,
1342 ) -> CUresult;
1343 let _none: Option<TensorMapEncodeTiledFn> = None;
1344 // Field name check: accessing the field compiles only if it exists.
1345 // We use a trait-object-based field-name probe: the macro produces a
1346 // compile error if the identifier does not exist.
1347 let _field_exists = |api: &DriverApi| api.cu_tensor_map_encode_tiled_memref.is_none();
1348 // Suppress unused variable warnings.
1349 let _ = _none;
1350 let _ = _field_exists;
1351 }
1352
1353 /// Verify that `cu_multicast_create` and `cu_multicast_add_device` fields
1354 /// exist with the correct Option<fn(...)> types (CUDA 12.8+ multicast).
1355 #[test]
1356 fn driver_v12_8_multicast_fields_present() {
1357 let _probe_create = |api: &DriverApi| api.cu_multicast_create.is_none();
1358 let _probe_add = |api: &DriverApi| api.cu_multicast_add_device.is_none();
1359 let _probe_gran = |api: &DriverApi| api.cu_multicast_get_granularity.is_none();
1360 let _ = (_probe_create, _probe_add, _probe_gran);
1361 }
1362
1363 /// Verify that `cu_memcpy_batch_async` field exists with the correct
1364 /// Option<fn(...)> type (CUDA 12.8+ batch memcpy).
1365 #[test]
1366 fn driver_v12_8_batch_memcpy_field_present() {
1367 let _probe = |api: &DriverApi| api.cu_memcpy_batch_async.is_none();
1368 let _ = _probe;
1369 }
1370
1371 /// Verify that `cu_kernel_get_library` field exists (CUDA 12.8+ JIT libs).
1372 #[test]
1373 fn driver_v12_8_kernel_get_library_field_present() {
1374 let _probe = |api: &DriverApi| api.cu_kernel_get_library.is_none();
1375 let _ = _probe;
1376 }
1377}