kaio_runtime/device.rs
1//! CUDA device management.
2
3use std::sync::{Arc, OnceLock};
4
5use cudarc::driver::{CudaContext, CudaStream, DeviceRepr, ValidAsZeroBits};
6
7use crate::buffer::GpuBuffer;
8use crate::error::Result;
9
10/// Process-wide latch for the debug-build performance note.
11///
12/// Sprint 7.0.5 A2: emit a one-time stderr note on first `KaioDevice::new`
13/// when the binary is built in debug mode. Prevents the common "benchmarked
14/// in debug, bounced" adoption failure where new users run a showcase example
15/// with `cargo run` (defaulting to debug) and conclude KAIO is slow. The note
16/// is performance-framed only — debug-mode does not affect correctness, and a
17/// `cargo test`-in-debug user checking kernel output should not see their
18/// correctness results cast into doubt.
19static DEBUG_WARNED: OnceLock<()> = OnceLock::new();
20
21/// Performance-framed debug-mode note body. `const` so tests can assert on
22/// its content without re-typing the message.
23const DEBUG_WARNING_MESSAGE: &str = "[kaio] Note: debug build — GPU kernel performance is ~10-20x slower than --release. Use `cargo run --release` / `cargo test --release` for representative performance numbers. Correctness is unaffected. Set KAIO_SUPPRESS_DEBUG_WARNING=1 to silence.";
24
25/// Pure decision function: should the debug-mode note fire on this
26/// process? Split out from [`maybe_warn_debug_build`] so the env-var
27/// logic is testable without the static `OnceLock` interfering.
28fn should_emit_debug_warning() -> bool {
29 cfg!(debug_assertions) && std::env::var("KAIO_SUPPRESS_DEBUG_WARNING").is_err()
30}
31
32/// Emit the debug-mode performance note to stderr once per process, if
33/// [`should_emit_debug_warning`] returns true.
34///
35/// Called from [`KaioDevice::new`] — every KAIO program hits this path on
36/// first launch, so the note surfaces exactly when a user would first
37/// benefit from knowing. In release builds, `cfg!(debug_assertions)` folds
38/// to `false` and the whole body compiles out.
39fn maybe_warn_debug_build() {
40 if should_emit_debug_warning() {
41 DEBUG_WARNED.get_or_init(|| {
42 eprintln!("{DEBUG_WARNING_MESSAGE}");
43 });
44 }
45}
46
47/// A KAIO GPU device — wraps a CUDA context and its default stream.
48///
49/// Created via [`KaioDevice::new`] with a device ordinal (0 for the first GPU).
50/// All allocation and transfer operations go through the default stream.
51///
52/// # Example
53///
54/// ```ignore
55/// let device = KaioDevice::new(0)?;
56/// let buf = device.alloc_from(&[1.0f32, 2.0, 3.0])?;
57/// let host = buf.to_host(&device)?;
58/// ```
59pub struct KaioDevice {
60 ctx: Arc<CudaContext>,
61 stream: Arc<CudaStream>,
62}
63
64impl std::fmt::Debug for KaioDevice {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 f.debug_struct("KaioDevice")
67 .field("ordinal", &self.ctx.ordinal())
68 .finish()
69 }
70}
71
72impl KaioDevice {
73 /// Create a new device targeting the GPU at the given ordinal.
74 ///
75 /// Ordinal 0 is the first GPU. Returns an error if no GPU exists at
76 /// that ordinal or if the CUDA driver fails to initialize.
77 pub fn new(ordinal: usize) -> Result<Self> {
78 maybe_warn_debug_build();
79 let ctx = CudaContext::new(ordinal)?;
80 let stream = ctx.default_stream();
81 Ok(Self { ctx, stream })
82 }
83
84 /// Query basic information about this device.
85 pub fn info(&self) -> Result<DeviceInfo> {
86 DeviceInfo::from_context(&self.ctx)
87 }
88
89 /// Allocate device memory and copy data from a host slice.
90 pub fn alloc_from<T: DeviceRepr>(&self, data: &[T]) -> Result<GpuBuffer<T>> {
91 let slice = self.stream.clone_htod(data)?;
92 Ok(GpuBuffer::from_raw(slice))
93 }
94
95 /// Allocate zero-initialized device memory.
96 pub fn alloc_zeros<T: DeviceRepr + ValidAsZeroBits>(&self, len: usize) -> Result<GpuBuffer<T>> {
97 let slice = self.stream.alloc_zeros::<T>(len)?;
98 Ok(GpuBuffer::from_raw(slice))
99 }
100
101 /// Access the underlying CUDA stream for kernel launch operations.
102 ///
103 /// Used with cudarc's `launch_builder` to launch kernels. In Phase 2,
104 /// the proc macro will generate typed wrappers that hide this.
105 pub fn stream(&self) -> &Arc<CudaStream> {
106 &self.stream
107 }
108
109 /// Load a PTX module from source text and return a [`crate::module::KaioModule`].
110 ///
111 /// The PTX text is passed to the CUDA driver's `cuModuleLoadData` —
112 /// no NVRTC compilation occurs. The driver JIT-compiles the PTX for
113 /// the current GPU.
114 ///
115 /// # Deprecated — prefer [`load_module`](Self::load_module)
116 ///
117 /// The module path runs
118 /// [`PtxModule::validate`](kaio_core::ir::PtxModule::validate)
119 /// before the driver sees the PTX, catching SM mismatches (e.g.
120 /// `mma.sync` on sub-Ampere targets) with readable
121 /// [`KaioError::Validation`](crate::error::KaioError::Validation)
122 /// errors instead of cryptic `ptxas` failures deep in the driver.
123 ///
124 /// This function remains public for raw-PTX use cases (external PTX
125 /// files, hand-written PTX for research, bypassing validation
126 /// intentionally). It is not scheduled for removal in the 0.2.x line.
127 ///
128 /// # Migration
129 ///
130 /// Before:
131 /// ```ignore
132 /// let ptx_text: String = build_my_ptx();
133 /// let module = device.load_ptx(&ptx_text)?;
134 /// ```
135 ///
136 /// After:
137 /// ```ignore
138 /// use kaio_core::ir::PtxModule;
139 /// let ptx_module: PtxModule = build_my_module("sm_80");
140 /// let module = device.load_module(&ptx_module)?;
141 /// ```
142 #[deprecated(
143 since = "0.2.1",
144 note = "use load_module(&PtxModule) — runs PtxModule::validate() for readable SM-mismatch errors"
145 )]
146 pub fn load_ptx(&self, ptx_text: &str) -> Result<crate::module::KaioModule> {
147 let ptx = cudarc::nvrtc::Ptx::from_src(ptx_text);
148 let module = self.ctx.load_module(ptx)?;
149 Ok(crate::module::KaioModule::from_raw(module))
150 }
151
152 /// Validate, emit, and load a [`kaio_core::ir::PtxModule`] on the device.
153 ///
154 /// This is the preferred entrypoint when the caller has an in-memory
155 /// `PtxModule` (as opposed to raw PTX text). Before the PTX text is
156 /// handed to the driver, [`kaio_core::ir::PtxModule::validate`]
157 /// checks that the module's target SM supports every feature used by
158 /// its kernels — raising
159 /// [`KaioError::Validation`](crate::error::KaioError::Validation) if
160 /// e.g. a `mma.sync` op is present but the target is `sm_70`.
161 ///
162 /// Surfacing the error at this layer gives the user a readable
163 /// message ("`mma.sync.m16n8k16 requires sm_80+, target is sm_70`")
164 /// instead of a cryptic `ptxas` error from deep in the driver.
165 pub fn load_module(
166 &self,
167 module: &kaio_core::ir::PtxModule,
168 ) -> Result<crate::module::KaioModule> {
169 use kaio_core::emit::{Emit, PtxWriter};
170
171 module.validate()?;
172
173 let mut w = PtxWriter::new();
174 module
175 .emit(&mut w)
176 .map_err(|e| crate::error::KaioError::PtxLoad(format!("emit failed: {e}")))?;
177 let ptx_text = w.finish();
178
179 // `load_ptx` is #[deprecated] as a public API to steer users to the
180 // validated module path, but it's still the correct internal
181 // implementation detail after we've emitted the PTX text here.
182 #[allow(deprecated)]
183 self.load_ptx(&ptx_text)
184 }
185}
186
187/// Basic information about a CUDA device.
188///
189/// Phase 1 includes name, compute capability, and total memory.
190/// Additional fields (SM count, max threads per block, max shared memory,
191/// warp size) are planned for Phase 3/4 when shared memory and occupancy
192/// calculations matter.
193#[derive(Debug, Clone)]
194pub struct DeviceInfo {
195 /// GPU device name (e.g. "NVIDIA GeForce RTX 4090").
196 pub name: String,
197 /// Compute capability as (major, minor) — e.g. (8, 9) for SM 8.9.
198 pub compute_capability: (u32, u32),
199 /// Total device memory in bytes.
200 pub total_memory: usize,
201}
202
203impl DeviceInfo {
204 /// Query device info from a CUDA context.
205 fn from_context(ctx: &Arc<CudaContext>) -> Result<Self> {
206 use cudarc::driver::result::device;
207
208 let ordinal = ctx.ordinal();
209 let dev = device::get(ordinal as i32)?;
210 let name = device::get_name(dev)?;
211 let total_memory = unsafe { device::total_mem(dev)? };
212
213 // SAFETY: dev is a valid device handle obtained from device::get().
214 // get_attribute reads a device property — no mutation, no aliasing.
215 let major = unsafe {
216 device::get_attribute(
217 dev,
218 cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR,
219 )?
220 };
221 let minor = unsafe {
222 device::get_attribute(
223 dev,
224 cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR,
225 )?
226 };
227
228 Ok(Self {
229 name,
230 compute_capability: (major as u32, minor as u32),
231 total_memory,
232 })
233 }
234}
235
236#[cfg(test)]
237mod tests {
238 use super::*;
239 use std::sync::OnceLock;
240
241 static DEVICE: OnceLock<KaioDevice> = OnceLock::new();
242 fn device() -> &'static KaioDevice {
243 DEVICE.get_or_init(|| KaioDevice::new(0).expect("GPU required for tests"))
244 }
245
246 // Sprint 7.0.5 A2: debug-mode performance note tests.
247 //
248 // These verify the pure-function half of the warning logic. The
249 // once-per-process behavior mediated by the static `DEBUG_WARNED`
250 // OnceLock is not testable in-process without restructuring (the
251 // latch is set for the lifetime of the test binary); manual/subprocess
252 // verification is in sprint_7_0_5.md.
253
254 #[test]
255 fn debug_warning_message_is_performance_framed_not_correctness_framed() {
256 // Regression canary (Sprint 7.0.5 A2 message framing): if the
257 // wording ever drifts to imply correctness is affected ("results
258 // are not meaningful," "output is invalid," etc.) this test
259 // fails. The whole point of the message is to prevent perf
260 // misunderstandings WITHOUT scaring off correctness testing.
261 let msg = DEBUG_WARNING_MESSAGE;
262 assert!(
263 msg.contains("performance"),
264 "debug warning must mention performance: {msg}"
265 );
266 assert!(
267 msg.contains("Correctness is unaffected") || msg.contains("correctness is unaffected"),
268 "debug warning must explicitly state correctness is unaffected: {msg}"
269 );
270 assert!(
271 !msg.to_lowercase().contains("not meaningful")
272 && !msg.to_lowercase().contains("invalid"),
273 "debug warning must NOT imply results are invalid/not meaningful: {msg}"
274 );
275 assert!(
276 msg.contains("KAIO_SUPPRESS_DEBUG_WARNING"),
277 "debug warning must document the opt-out env var: {msg}"
278 );
279 }
280
281 #[test]
282 fn debug_warning_opt_out_env_var_suppresses() {
283 // SAFETY: single-threaded env-var manipulation inside a test.
284 // Restore the prior value (if any) before returning so other
285 // tests in the same binary don't observe stale state.
286 let prev = std::env::var("KAIO_SUPPRESS_DEBUG_WARNING").ok();
287 unsafe {
288 std::env::set_var("KAIO_SUPPRESS_DEBUG_WARNING", "1");
289 }
290 assert!(
291 !should_emit_debug_warning(),
292 "KAIO_SUPPRESS_DEBUG_WARNING=1 must suppress the warning"
293 );
294 unsafe {
295 std::env::remove_var("KAIO_SUPPRESS_DEBUG_WARNING");
296 }
297 // In debug builds the warning should now be allowed; in release
298 // builds cfg!(debug_assertions) is false so it's suppressed either
299 // way. Assert the cfg-consistent expectation.
300 assert_eq!(should_emit_debug_warning(), cfg!(debug_assertions));
301 // Restore
302 if let Some(v) = prev {
303 unsafe {
304 std::env::set_var("KAIO_SUPPRESS_DEBUG_WARNING", v);
305 }
306 }
307 }
308
309 #[test]
310 #[ignore] // requires NVIDIA GPU
311 fn device_creation() {
312 let dev = KaioDevice::new(0);
313 assert!(dev.is_ok(), "KaioDevice::new(0) failed: {dev:?}");
314 }
315
316 #[test]
317 #[ignore]
318 fn device_info_name() {
319 let info = device().info().expect("info() failed");
320 assert!(!info.name.is_empty(), "device name should not be empty");
321 // RTX 4090 should contain "4090" somewhere in the name
322 eprintln!("GPU name: {}", info.name);
323 }
324
325 #[test]
326 #[ignore]
327 fn device_info_compute_capability() {
328 let info = device().info().expect("info() failed");
329 // Any SM 7.0+ GPU should work (Volta and newer)
330 let (major, _minor) = info.compute_capability;
331 assert!(
332 major >= 7,
333 "expected SM 7.0+ GPU, got SM {}.{}",
334 info.compute_capability.0,
335 info.compute_capability.1,
336 );
337 eprintln!(
338 "GPU compute capability: SM {}.{}",
339 info.compute_capability.0, info.compute_capability.1
340 );
341 }
342
343 #[test]
344 #[ignore]
345 fn buffer_roundtrip_f32() {
346 let data = vec![1.0f32, 2.0, 3.0, 4.0, 5.0];
347 let buf = device().alloc_from(&data).expect("alloc_from failed");
348 let result = buf.to_host(device()).expect("to_host failed");
349 assert_eq!(result, data, "roundtrip data mismatch");
350 }
351
352 #[test]
353 #[ignore]
354 fn buffer_alloc_zeros() {
355 let buf = device()
356 .alloc_zeros::<f32>(100)
357 .expect("alloc_zeros failed");
358 let result = buf.to_host(device()).expect("to_host failed");
359 assert_eq!(result, vec![0.0f32; 100]);
360 }
361
362 #[test]
363 #[ignore]
364 fn buffer_len() {
365 let buf = device()
366 .alloc_from(&[1.0f32, 2.0, 3.0])
367 .expect("alloc_from failed");
368 assert_eq!(buf.len(), 3);
369 assert!(!buf.is_empty());
370 }
371
372 #[test]
373 #[ignore]
374 fn invalid_device_ordinal() {
375 let result = KaioDevice::new(999);
376 assert!(result.is_err(), "expected error for ordinal 999");
377 }
378}