1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
//! CUDA runtime implementation
use super::cache::{
get_or_create_client, is_cuda_context_valid, log_cuda_memory_error, try_get_cached_client,
try_get_cached_stream,
};
use super::client::CudaAllocator;
use super::client::CudaClient;
use super::device::CudaDevice;
use super::kernels;
use crate::runtime::Runtime;
use crate::runtime::common::Allocator;
/// CUDA Runtime adapter
///
/// Implements the generic Runtime trait for CUDA backend.
/// Uses cudarc for direct GPU control.
#[derive(Clone, Debug, Default)]
pub struct CudaRuntime;
impl Runtime for CudaRuntime {
type Device = CudaDevice;
type Client = CudaClient;
type Allocator = CudaAllocator;
type Graph = super::CudaGraph;
type RawHandle = super::CudaRawHandle;
type DType = crate::dtype::DType;
fn name() -> &'static str {
"cuda"
}
fn supports_graph_capture() -> bool {
true // CUDA supports graph capture
}
fn capture_graph<F, T>(client: &Self::Client, f: F) -> crate::error::Result<(Self::Graph, T)>
where
F: FnOnce(&Self::Client) -> crate::error::Result<T>,
{
use cudarc::driver::sys::CUstreamCaptureMode;
// Freeze the caching allocator so all alloc/free calls go directly
// through cuMemAllocAsync/cuMemFreeAsync, creating proper graph nodes.
// Without this, the free-list cache intercepts deallocations (no graph
// free node) and satisfies allocations from cache (no graph alloc node),
// corrupting the graph's internal memory management on replay.
client.allocator.freeze();
// Begin stream capture — all ops on this stream are recorded, not executed
client
.stream
.begin_capture(CUstreamCaptureMode::CU_STREAM_CAPTURE_MODE_GLOBAL)?;
// Execute the closure — ops are recorded into the graph
let result = f(client);
// End capture — MUST happen even if the closure failed, otherwise the
// stream is left in capture mode and all subsequent operations fail.
//
// AUTO_FREE_ON_LAUNCH: graph-managed memory allocated during capture is
// freed on each launch. For graph capture in training (where we re-run
// the same graph), this is acceptable — each launch re-allocates.
// For inference with stable output pointers, the caller must copy the
// output tensor after each launch before the next launch frees it.
let flags = cudarc::driver::sys::CUgraphInstantiate_flags::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH;
let graph_result = client.stream.end_capture(flags);
// Restore caching allocator for normal (non-capture) operations
client.allocator.unfreeze();
// Handle closure error: propagate after restoring stream
let closure_result = result?;
// Handle capture error
let graph_opt = graph_result?;
let cudarc_graph = graph_opt.ok_or_else(|| {
crate::error::Error::Backend(
"CUDA graph capture produced no operations — closure recorded nothing".into(),
)
})?;
Ok((super::CudaGraph::new(cudarc_graph), closure_result))
}
/// Allocate GPU memory.
///
/// Routes through the client's caching allocator (free-list pool) to avoid
/// cuMemAllocAsync driver round-trips for repeated same-size allocations.
fn allocate(size_bytes: usize, device: &Self::Device) -> crate::error::Result<u64> {
if size_bytes == 0 {
return Ok(0);
}
let client = get_or_create_client(device);
client.allocator.allocate(size_bytes)
}
/// Deallocate GPU memory.
///
/// Routes through the client's caching allocator — buffers are returned to
/// the free-list for reuse instead of calling cuMemFreeAsync.
fn deallocate(ptr: u64, size_bytes: usize, device: &Self::Device) {
if ptr == 0 {
return;
}
// Try to use the client's caching allocator (returns to free-list)
if let Some(client) = try_get_cached_client(device.index) {
client.allocator.deallocate(ptr, size_bytes);
return;
}
// Client not available (shutdown) — free directly
unsafe {
if !is_cuda_context_valid() {
return;
}
let result = if let Some(stream) = try_get_cached_stream(device.index) {
cudarc::driver::sys::cuMemFreeAsync(ptr, stream)
} else {
cudarc::driver::sys::cuMemFree_v2(ptr)
};
if result != cudarc::driver::sys::CUresult::CUDA_SUCCESS
&& result != cudarc::driver::sys::CUresult::CUDA_ERROR_ILLEGAL_ADDRESS
{
log_cuda_memory_error("cuMemFree", ptr, result);
}
}
}
/// Copy data from host to device.
///
/// Returns an error if the CUDA copy operation fails.
fn copy_to_device(src: &[u8], dst: u64, device: &Self::Device) -> crate::error::Result<()> {
if src.is_empty() || dst == 0 {
return Ok(());
}
let client = get_or_create_client(device);
unsafe {
let result = cudarc::driver::sys::cuMemcpyHtoDAsync_v2(
dst,
src.as_ptr() as *const std::ffi::c_void,
src.len(),
client.stream.cu_stream(),
);
if result != cudarc::driver::sys::CUresult::CUDA_SUCCESS {
return Err(crate::error::Error::Backend(format!(
"CUDA host-to-device copy failed: {} bytes ({:?})",
src.len(),
result
)));
}
// No explicit sync needed: with pageable (non-pinned) host memory,
// cuMemcpyHtoDAsync is synchronous w.r.t. the host buffer — the call
// returns only after the copy is complete. An explicit stream.synchronize()
// here would also drain ALL pending GPU work, destroying pipeline throughput.
}
Ok(())
}
/// Copy data from device to host.
///
/// Returns an error if the CUDA copy operation fails.
fn copy_from_device(
src: u64,
dst: &mut [u8],
device: &Self::Device,
) -> crate::error::Result<()> {
if dst.is_empty() || src == 0 {
return Ok(());
}
let client = get_or_create_client(device);
unsafe {
let result = cudarc::driver::sys::cuMemcpyDtoHAsync_v2(
dst.as_mut_ptr() as *mut std::ffi::c_void,
src,
dst.len(),
client.stream.cu_stream(),
);
if result != cudarc::driver::sys::CUresult::CUDA_SUCCESS {
return Err(crate::error::Error::Backend(format!(
"[numr::cuda] Device-to-host copy failed: {} bytes ({:?})",
dst.len(),
result
)));
}
// With pageable host memory, cuMemcpyDtoHAsync blocks the host until
// the copy completes. However, we still need to synchronize the stream
// to ensure all prior GPU kernels have finished producing the data.
let _ = client.stream.synchronize();
}
Ok(())
}
/// Record an event on the compute stream.
fn record_compute_event(device: &Self::Device) -> crate::error::Result<u64> {
let client = get_or_create_client(device);
client
.record_event_on_compute()
.map_err(|e| crate::error::Error::Backend(format!("Event record failed: {}", e)))
}
/// Pipelined D2H copy: copy stream waits on the provided event, copies,
/// and syncs only the copy stream. Compute stream continues concurrently.
fn copy_from_device_pipelined(
src: u64,
dst: &mut [u8],
device: &Self::Device,
event: u64,
) -> crate::error::Result<()> {
if dst.is_empty() || src == 0 {
return Ok(());
}
let client = get_or_create_client(device);
unsafe {
// 1. Copy stream waits for event (waits for argmax to finish)
client.copy_stream_wait_event(event).map_err(|e| {
client.destroy_event(event);
crate::error::Error::Backend(format!("Stream wait event failed: {}", e))
})?;
// 2. Launch D2H copy on copy stream
let result = cudarc::driver::sys::cuMemcpyDtoHAsync_v2(
dst.as_mut_ptr() as *mut std::ffi::c_void,
src,
dst.len(),
client.copy_stream.cu_stream(),
);
if result != cudarc::driver::sys::CUresult::CUDA_SUCCESS {
client.destroy_event(event);
return Err(crate::error::Error::Backend(format!(
"[numr::cuda] Pipelined D2H copy failed: {} bytes ({:?})",
dst.len(),
result
)));
}
// 3. Sync ONLY the copy stream (compute stream keeps running)
let _ = client.copy_stream.synchronize();
client.destroy_event(event);
}
Ok(())
}
/// Copy data within device memory.
///
/// Returns an error if the CUDA copy operation fails.
fn copy_within_device(
src: u64,
dst: u64,
size_bytes: usize,
device: &Self::Device,
) -> crate::error::Result<()> {
if size_bytes == 0 || src == 0 || dst == 0 {
return Ok(());
}
let client = get_or_create_client(device);
unsafe {
let result = cudarc::driver::sys::cuMemcpyDtoDAsync_v2(
dst,
src,
size_bytes,
client.stream.cu_stream(),
);
if result != cudarc::driver::sys::CUresult::CUDA_SUCCESS {
return Err(crate::error::Error::Backend(format!(
"[numr::cuda] Device-to-device copy failed: {} bytes ({:?})",
size_bytes, result
)));
}
}
Ok(())
}
fn copy_strided(
src_handle: u64,
src_byte_offset: usize,
dst_handle: u64,
shape: &[usize],
strides: &[isize],
elem_size: usize,
device: &Self::Device,
) -> crate::error::Result<()> {
if src_handle == 0 || dst_handle == 0 || shape.is_empty() {
return Ok(());
}
let numel: usize = shape.iter().product();
if numel == 0 {
return Ok(());
}
let ndim = shape.len();
let client = get_or_create_client(device);
// Shape and strides are passed as kernel arguments (by value), not device
// memory pointers. This is critical for CUDA graph capture compatibility:
// H2D copies of temporary host data create graph memcpy nodes that re-read
// from stale host addresses on replay, causing CUDA_ERROR_ILLEGAL_ADDRESS.
unsafe {
let kernel_result = kernels::launch_strided_copy(
&client.context,
&client.stream,
device.index,
src_handle,
dst_handle,
shape,
strides,
numel,
ndim,
elem_size,
src_byte_offset,
);
if let Err(e) = kernel_result {
return Err(crate::error::Error::Backend(format!(
"[numr::cuda] Strided copy kernel failed: {} bytes ({} elements × {} bytes/elem) from {} to {} on device {}: {:?}",
numel * elem_size,
numel,
elem_size,
src_handle,
dst_handle,
device.index,
e
)));
}
}
Ok(())
}
fn default_device() -> Self::Device {
CudaDevice::new(0)
}
fn default_client(device: &Self::Device) -> Self::Client {
get_or_create_client(device)
}
fn raw_handle(client: &Self::Client) -> &Self::RawHandle {
&client.raw_handle
}
}
/// Get the default CUDA device (device 0)
pub fn cuda_device() -> CudaDevice {
CudaDevice::new(0)
}
/// Get a specific CUDA device by ID
pub fn cuda_device_id(device_id: usize) -> CudaDevice {
CudaDevice::new(device_id)
}
/// Check if CUDA is available on this system
pub fn is_cuda_available() -> bool {
std::panic::catch_unwind(|| {
let device = CudaDevice::new(0);
let _client = get_or_create_client(&device);
})
.is_ok()
}