Skip to main content

kaio_runtime/
device.rs

1//! CUDA device management.
2
3use std::sync::Arc;
4
5use cudarc::driver::{CudaContext, CudaStream, DeviceRepr, ValidAsZeroBits};
6
7use crate::buffer::GpuBuffer;
8use crate::error::Result;
9
10/// A KAIO GPU device — wraps a CUDA context and its default stream.
11///
12/// Created via [`KaioDevice::new`] with a device ordinal (0 for the first GPU).
13/// All allocation and transfer operations go through the default stream.
14///
15/// # Example
16///
17/// ```ignore
18/// let device = KaioDevice::new(0)?;
19/// let buf = device.alloc_from(&[1.0f32, 2.0, 3.0])?;
20/// let host = buf.to_host(&device)?;
21/// ```
22pub struct KaioDevice {
23    ctx: Arc<CudaContext>,
24    stream: Arc<CudaStream>,
25}
26
27impl std::fmt::Debug for KaioDevice {
28    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
29        f.debug_struct("KaioDevice")
30            .field("ordinal", &self.ctx.ordinal())
31            .finish()
32    }
33}
34
35impl KaioDevice {
36    /// Create a new device targeting the GPU at the given ordinal.
37    ///
38    /// Ordinal 0 is the first GPU. Returns an error if no GPU exists at
39    /// that ordinal or if the CUDA driver fails to initialize.
40    pub fn new(ordinal: usize) -> Result<Self> {
41        let ctx = CudaContext::new(ordinal)?;
42        let stream = ctx.default_stream();
43        Ok(Self { ctx, stream })
44    }
45
46    /// Query basic information about this device.
47    pub fn info(&self) -> Result<DeviceInfo> {
48        DeviceInfo::from_context(&self.ctx)
49    }
50
51    /// Allocate device memory and copy data from a host slice.
52    pub fn alloc_from<T: DeviceRepr>(&self, data: &[T]) -> Result<GpuBuffer<T>> {
53        let slice = self.stream.clone_htod(data)?;
54        Ok(GpuBuffer::from_raw(slice))
55    }
56
57    /// Allocate zero-initialized device memory.
58    pub fn alloc_zeros<T: DeviceRepr + ValidAsZeroBits>(&self, len: usize) -> Result<GpuBuffer<T>> {
59        let slice = self.stream.alloc_zeros::<T>(len)?;
60        Ok(GpuBuffer::from_raw(slice))
61    }
62
63    /// Access the underlying CUDA stream for kernel launch operations.
64    ///
65    /// Used with cudarc's `launch_builder` to launch kernels. In Phase 2,
66    /// the proc macro will generate typed wrappers that hide this.
67    pub fn stream(&self) -> &Arc<CudaStream> {
68        &self.stream
69    }
70
71    /// Load a PTX module from source text and return a [`crate::module::KaioModule`].
72    ///
73    /// The PTX text is passed to the CUDA driver's `cuModuleLoadData` —
74    /// no NVRTC compilation occurs. The driver JIT-compiles the PTX for
75    /// the current GPU.
76    ///
77    /// # Example
78    ///
79    /// ```ignore
80    /// let module = device.load_ptx(&ptx_text)?;
81    /// let func = module.function("vector_add")?;
82    /// ```
83    pub fn load_ptx(&self, ptx_text: &str) -> Result<crate::module::KaioModule> {
84        let ptx = cudarc::nvrtc::Ptx::from_src(ptx_text);
85        let module = self.ctx.load_module(ptx)?;
86        Ok(crate::module::KaioModule::from_raw(module))
87    }
88
89    /// Validate, emit, and load a [`kaio_core::ir::PtxModule`] on the device.
90    ///
91    /// This is the preferred entrypoint when the caller has an in-memory
92    /// `PtxModule` (as opposed to raw PTX text). Before the PTX text is
93    /// handed to the driver, [`kaio_core::ir::PtxModule::validate`]
94    /// checks that the module's target SM supports every feature used by
95    /// its kernels — raising
96    /// [`KaioError::Validation`](crate::error::KaioError::Validation) if
97    /// e.g. a `mma.sync` op is present but the target is `sm_70`.
98    ///
99    /// Surfacing the error at this layer gives the user a readable
100    /// message ("`mma.sync.m16n8k16 requires sm_80+, target is sm_70`")
101    /// instead of a cryptic `ptxas` error from deep in the driver.
102    pub fn load_module(
103        &self,
104        module: &kaio_core::ir::PtxModule,
105    ) -> Result<crate::module::KaioModule> {
106        use kaio_core::emit::{Emit, PtxWriter};
107
108        module.validate()?;
109
110        let mut w = PtxWriter::new();
111        module
112            .emit(&mut w)
113            .map_err(|e| crate::error::KaioError::PtxLoad(format!("emit failed: {e}")))?;
114        let ptx_text = w.finish();
115
116        self.load_ptx(&ptx_text)
117    }
118}
119
120/// Basic information about a CUDA device.
121///
122/// Phase 1 includes name, compute capability, and total memory.
123/// Additional fields (SM count, max threads per block, max shared memory,
124/// warp size) are planned for Phase 3/4 when shared memory and occupancy
125/// calculations matter.
126#[derive(Debug, Clone)]
127pub struct DeviceInfo {
128    /// GPU device name (e.g. "NVIDIA GeForce RTX 4090").
129    pub name: String,
130    /// Compute capability as (major, minor) — e.g. (8, 9) for SM 8.9.
131    pub compute_capability: (u32, u32),
132    /// Total device memory in bytes.
133    pub total_memory: usize,
134}
135
136impl DeviceInfo {
137    /// Query device info from a CUDA context.
138    fn from_context(ctx: &Arc<CudaContext>) -> Result<Self> {
139        use cudarc::driver::result::device;
140
141        let ordinal = ctx.ordinal();
142        let dev = device::get(ordinal as i32)?;
143        let name = device::get_name(dev)?;
144        let total_memory = unsafe { device::total_mem(dev)? };
145
146        // SAFETY: dev is a valid device handle obtained from device::get().
147        // get_attribute reads a device property — no mutation, no aliasing.
148        let major = unsafe {
149            device::get_attribute(
150                dev,
151                cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR,
152            )?
153        };
154        let minor = unsafe {
155            device::get_attribute(
156                dev,
157                cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR,
158            )?
159        };
160
161        Ok(Self {
162            name,
163            compute_capability: (major as u32, minor as u32),
164            total_memory,
165        })
166    }
167}
168
169#[cfg(test)]
170mod tests {
171    use super::*;
172    use std::sync::OnceLock;
173
174    static DEVICE: OnceLock<KaioDevice> = OnceLock::new();
175    fn device() -> &'static KaioDevice {
176        DEVICE.get_or_init(|| KaioDevice::new(0).expect("GPU required for tests"))
177    }
178
179    #[test]
180    #[ignore] // requires NVIDIA GPU
181    fn device_creation() {
182        let dev = KaioDevice::new(0);
183        assert!(dev.is_ok(), "KaioDevice::new(0) failed: {dev:?}");
184    }
185
186    #[test]
187    #[ignore]
188    fn device_info_name() {
189        let info = device().info().expect("info() failed");
190        assert!(!info.name.is_empty(), "device name should not be empty");
191        // RTX 4090 should contain "4090" somewhere in the name
192        eprintln!("GPU name: {}", info.name);
193    }
194
195    #[test]
196    #[ignore]
197    fn device_info_compute_capability() {
198        let info = device().info().expect("info() failed");
199        // Any SM 7.0+ GPU should work (Volta and newer)
200        let (major, _minor) = info.compute_capability;
201        assert!(
202            major >= 7,
203            "expected SM 7.0+ GPU, got SM {}.{}",
204            info.compute_capability.0,
205            info.compute_capability.1,
206        );
207        eprintln!(
208            "GPU compute capability: SM {}.{}",
209            info.compute_capability.0, info.compute_capability.1
210        );
211    }
212
213    #[test]
214    #[ignore]
215    fn buffer_roundtrip_f32() {
216        let data = vec![1.0f32, 2.0, 3.0, 4.0, 5.0];
217        let buf = device().alloc_from(&data).expect("alloc_from failed");
218        let result = buf.to_host(device()).expect("to_host failed");
219        assert_eq!(result, data, "roundtrip data mismatch");
220    }
221
222    #[test]
223    #[ignore]
224    fn buffer_alloc_zeros() {
225        let buf = device()
226            .alloc_zeros::<f32>(100)
227            .expect("alloc_zeros failed");
228        let result = buf.to_host(device()).expect("to_host failed");
229        assert_eq!(result, vec![0.0f32; 100]);
230    }
231
232    #[test]
233    #[ignore]
234    fn buffer_len() {
235        let buf = device()
236            .alloc_from(&[1.0f32, 2.0, 3.0])
237            .expect("alloc_from failed");
238        assert_eq!(buf.len(), 3);
239        assert!(!buf.is_empty());
240    }
241
242    #[test]
243    #[ignore]
244    fn invalid_device_ordinal() {
245        let result = KaioDevice::new(999);
246        assert!(result.is_err(), "expected error for ordinal 999");
247    }
248}