cubecl_cuda/
runtime.rs

1use crate::{
2    WmmaCompiler,
3    compute::{CudaServer, context::CudaContext},
4    device::CudaDevice,
5};
6use cubecl_common::{
7    device::{Device, DeviceState},
8    profile::TimingMethod,
9};
10use cubecl_core::{
11    MemoryConfiguration, Runtime,
12    ir::{
13        BarrierLevel, ContiguousElements, DeviceProperties, ElemType, FloatKind,
14        HardwareProperties, LineSize, MatrixLayout, MemoryDeviceProperties, MmaProperties,
15        OpaqueType, SemanticType, StorageType, TargetProperties,
16        features::{Plane, Tma, TypeUsage},
17    },
18    server::ServerUtilities,
19};
20use cubecl_cpp::{
21    DialectWmmaCompiler,
22    cuda::{CudaDialect, arch::CudaArchitecture, mma::contiguous_elements_cuda},
23    register_supported_types,
24    shared::{
25        CompilationOptions, CppCompiler, CppSupportedFeatures, register_mma_features,
26        register_scaled_mma_features, register_wmma_features,
27    },
28};
29use cubecl_runtime::{client::ComputeClient, logging::ServerLogger};
30use cubecl_zspace::striding::has_pitched_row_major_strides;
31use cudarc::driver::sys::{CUDA_VERSION, cuDeviceTotalMem_v2};
32use std::{mem::MaybeUninit, sync::Arc};
33
34/// Options configuring the CUDA runtime.
35#[derive(Default)]
36pub struct RuntimeOptions {
37    /// Configures the memory management.
38    pub memory_config: MemoryConfiguration,
39}
40
41#[derive(Debug)]
42pub struct CudaRuntime;
43
44impl DeviceState for CudaServer {
45    fn init(device_id: cubecl_common::device::DeviceId) -> Self {
46        let options = RuntimeOptions::default();
47        let device = CudaDevice::from_id(device_id);
48
49        // To get the supported WMMA features, and memory properties, we have to initialize the server immediately.
50        cudarc::driver::result::init().unwrap();
51        let device_id = device.index as i32;
52        let device_ptr = cudarc::driver::result::device::get(device_id).unwrap();
53        let arch_major;
54        let arch_version = unsafe {
55            arch_major = cudarc::driver::result::device::get_attribute(
56            device_ptr,
57            cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR,
58        )
59        .unwrap();
60            let minor = cudarc::driver::result::device::get_attribute(
61            device_ptr,
62            cudarc::driver::sys::CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR,
63        )
64        .unwrap();
65            arch_major * 10 + minor
66        } as u32;
67
68        // This is the alignment returned by `cuMallocPitched`, so it's the one considered optimal
69        // for row alignment by CUDA. This hasn't changed since at least the GTX 700 series.
70        // Querying texture row align is a heuristic, but also not guaranteed to be the same.
71        let mem_alignment = 512;
72
73        // Ask the wmma compiler for its supported combinations
74        let arch = CudaArchitecture {
75            version: arch_version,
76        };
77        let supported_wmma_combinations = WmmaCompiler::supported_wmma_combinations(&arch);
78        let supported_mma_combinations = WmmaCompiler::supported_mma_combinations(&arch);
79        let supported_scaled_mma_combinations =
80            WmmaCompiler::supported_scaled_mma_combinations(&arch);
81
82        let ctx = unsafe {
83            let ctx = cudarc::driver::result::primary_ctx::retain(device_ptr).unwrap();
84            cudarc::driver::result::ctx::set_current(ctx).unwrap();
85            ctx
86        };
87
88        let max_memory = unsafe {
89            let mut bytes = MaybeUninit::uninit();
90            cuDeviceTotalMem_v2(bytes.as_mut_ptr(), device_ptr);
91            bytes.assume_init() as u64
92        };
93        let mem_properties = MemoryDeviceProperties {
94            max_page_size: max_memory / 4,
95            alignment: mem_alignment as u64,
96        };
97
98        let mut comp_opts = CompilationOptions {
99            supports_features: CppSupportedFeatures {
100                fast_math: true,
101                ..Default::default()
102            },
103            ..Default::default()
104        };
105
106        let hardware_props = unsafe {
107            use cudarc::driver::{result::device::get_attribute, sys::CUdevice_attribute::*};
108            let warp_size =
109                get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_WARP_SIZE).unwrap() as u32;
110            let max_shared = get_attribute(
111                device_ptr,
112                CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
113            )
114            .unwrap() as usize;
115            let max_threads = get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK)
116                .unwrap() as u32;
117            let block_dim_x =
118                get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X).unwrap();
119            let block_dim_y =
120                get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y).unwrap();
121            let block_dim_z =
122                get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z).unwrap();
123            let max_cube_dim = (block_dim_x as u32, block_dim_y as u32, block_dim_z as u32);
124
125            let grid_dim_x = get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X).unwrap();
126            let grid_dim_y = get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y).unwrap();
127            let grid_dim_z = get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z).unwrap();
128            let max_cube_count = (grid_dim_x as u32, grid_dim_y as u32, grid_dim_z as u32);
129
130            let num_streaming_multiprocessors = Some(
131                get_attribute(device_ptr, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT).unwrap() as u32,
132            );
133            let num_tensor_cores = tensor_cores_per_sm(arch_version);
134
135            comp_opts.warp_size = warp_size;
136
137            HardwareProperties {
138                load_width: 128,
139                plane_size_min: warp_size,
140                plane_size_max: warp_size,
141                max_bindings: crate::device::CUDA_MAX_BINDINGS,
142                max_shared_memory_size: max_shared,
143                max_cube_count,
144                max_units_per_cube: max_threads,
145                max_cube_dim,
146                num_streaming_multiprocessors,
147                num_tensor_cores,
148                min_tensor_cores_dim: if supported_wmma_combinations.is_empty() {
149                    None
150                } else {
151                    Some(8)
152                },
153                num_cpu_cores: None,
154            }
155        };
156
157        let mut device_props = DeviceProperties::new(
158            Default::default(),
159            mem_properties.clone(),
160            hardware_props,
161            TimingMethod::System,
162        );
163        register_supported_types(&mut device_props);
164        device_props.register_type_usage(ElemType::Float(FloatKind::TF32), TypeUsage::Conversion);
165        if arch_version >= 60 {
166            device_props.register_type_usage(
167                StorageType::Atomic(ElemType::Float(FloatKind::F64)),
168                TypeUsage::AtomicAdd | TypeUsage::AtomicLoadStore,
169            );
170        }
171        if arch_version >= 70 {
172            device_props.register_type_usage(
173                StorageType::Atomic(ElemType::Float(FloatKind::F16)),
174                TypeUsage::AtomicAdd | TypeUsage::AtomicLoadStore,
175            );
176            device_props.register_semantic_type(SemanticType::Pipeline);
177            device_props
178                .register_type_usage(OpaqueType::Barrier(BarrierLevel::Unit), TypeUsage::Buffer);
179            device_props
180                .register_type_usage(OpaqueType::Barrier(BarrierLevel::Cube), TypeUsage::Buffer);
181            device_props.features.plane.insert(Plane::Sync);
182            comp_opts.supports_features.grid_constants = true;
183        }
184
185        if arch_version >= 75 {
186            device_props
187                .features
188                .ldmatrix
189                .insert(ElemType::Float(FloatKind::F16).into());
190            device_props
191                .features
192                .ldmatrix
193                .insert(ElemType::Float(FloatKind::BF16).into());
194            comp_opts.supports_features.fast_tanh = CUDA_VERSION >= 12080;
195        }
196
197        if arch_version >= 80 {
198            device_props.features.copy_async = true;
199        }
200
201        // NOTE: I commented that since I observed synchronisation issues with atomic add for bf16.
202        // if arch.get_version() >= 80 {
203        //     device_props.register_feature(Feature::Type(Elem::AtomicFloat(FloatKind::BF16)));
204        // }
205
206        if arch_version >= 89 {
207            device_props.register_type_usage(
208                ElemType::Float(FloatKind::E4M3),
209                TypeUsage::Conversion | TypeUsage::Buffer,
210            );
211            device_props.register_type_usage(
212                ElemType::Float(FloatKind::E5M2),
213                TypeUsage::Conversion | TypeUsage::Buffer,
214            );
215        }
216        if arch_version >= 90 {
217            device_props.features.tma.insert(Tma::Base);
218            device_props.register_semantic_type(SemanticType::TensorMap);
219            device_props.features.cube_cluster = true;
220            comp_opts.supports_features.clusters = true;
221            comp_opts.supports_features.elect_sync = true;
222            device_props
223                .features
224                .stmatrix
225                .insert(ElemType::Float(FloatKind::F16).into());
226            device_props
227                .features
228                .stmatrix
229                .insert(ElemType::Float(FloatKind::BF16).into());
230        }
231
232        if arch_version >= 100 {
233            device_props.features.tma.insert(Tma::Im2colWide);
234            // Breaks swizzle so disable for now and fix in a PR specifically for this
235            // if CUDA_VERSION >= 12090 {
236            //     device_props.hardware.load_width = 256;
237            // }
238        }
239
240        // NOTE: FP6/FP4 is explicitly not marked as forward compatible, but is compatible within a
241        // major version. Try to keep this up to date with new arch major revisions if they also
242        // implement it.
243        if arch_major == 10 || arch_major == 11 || arch_major == 12 {
244            device_props
245                .register_type_usage(ElemType::Float(FloatKind::E2M1), TypeUsage::Conversion);
246            device_props.register_type_usage(
247                StorageType::Packed(ElemType::Float(FloatKind::E2M1), 2),
248                TypeUsage::Conversion | TypeUsage::Buffer,
249            );
250            device_props.register_type_usage(
251                ElemType::Float(FloatKind::E2M3),
252                TypeUsage::Conversion | TypeUsage::Buffer,
253            );
254            device_props.register_type_usage(
255                ElemType::Float(FloatKind::E3M2),
256                TypeUsage::Conversion | TypeUsage::Buffer,
257            );
258            device_props.register_type_usage(
259                ElemType::Float(FloatKind::UE8M0),
260                TypeUsage::Conversion | TypeUsage::Buffer,
261            );
262
263            if CUDA_VERSION >= 12080 {
264                device_props.features.tma.insert(Tma::SwizzleAtomicity);
265            }
266        }
267
268        device_props.features.dynamic_line_size = true;
269        device_props.features.alignment = true;
270        device_props.features.plane.insert(Plane::Ops);
271        device_props
272            .features
273            .plane
274            .insert(Plane::NonUniformControlFlow);
275
276        register_wmma_features(supported_wmma_combinations, &mut device_props);
277        register_mma_features(supported_mma_combinations, &mut device_props);
278        register_scaled_mma_features(supported_scaled_mma_combinations, &mut device_props);
279
280        let cuda_ctx = CudaContext::new(comp_opts, ctx, arch);
281        let logger = Arc::new(ServerLogger::default());
282        let utilities = ServerUtilities::new(device_props, logger, ());
283
284        CudaServer::new(
285            cuda_ctx,
286            mem_properties,
287            options.memory_config,
288            mem_alignment,
289            device_id,
290            utilities,
291        )
292    }
293}
294
295pub type CudaCompiler = CppCompiler<CudaDialect<WmmaCompiler>>;
296
297fn tensor_cores_per_sm(version: u32) -> Option<u32> {
298    match version {
299        70 | 75 => Some(8),                           // Volta, Turing
300        80 | 86 | 89 | 90 | 91 | 92 | 100 => Some(4), // Ampere, Hopper, Blackwell
301        _ => None,                                    // Unknown or unsupported architecture
302    }
303}
304
305impl Runtime for CudaRuntime {
306    type Compiler = CudaCompiler;
307    type Server = CudaServer;
308    type Device = CudaDevice;
309
310    fn client(device: &Self::Device) -> ComputeClient<Self> {
311        ComputeClient::load(device)
312    }
313
314    fn name(_client: &ComputeClient<Self>) -> &'static str {
315        "cuda"
316    }
317
318    fn require_array_lengths() -> bool {
319        true
320    }
321
322    fn supported_line_sizes() -> &'static [LineSize] {
323        &[16, 8, 4, 2, 1]
324    }
325
326    fn max_cube_count() -> (u32, u32, u32) {
327        (i32::MAX as u32, u16::MAX as u32, u16::MAX as u32)
328    }
329
330    fn can_read_tensor(shape: &[usize], strides: &[usize]) -> bool {
331        has_pitched_row_major_strides(shape, strides)
332    }
333
334    fn target_properties() -> TargetProperties {
335        TargetProperties {
336            mma: MmaProperties {
337                register_size_bits: 32,
338                const_plane_size: 32,
339                register_layout_a: MatrixLayout::RowMajor,
340                register_layout_b: MatrixLayout::ColMajor,
341                register_layout_acc: MatrixLayout::RowMajor,
342                register_duplication_a: 1,
343                register_duplication_b: 1,
344                register_duplication_acc: 1,
345                contiguous_elements: ContiguousElements::new(contiguous_elements_cuda),
346            },
347        }
348    }
349}