cubecl_wgpu/
runtime.rs

1use crate::{
2    AutoCompiler, AutoGraphicsApi, GraphicsApi, WgpuDevice, backend, compute::WgpuServer,
3    contiguous_strides,
4};
5use cubecl_common::device::{Device, DeviceState};
6use cubecl_common::{future, profile::TimingMethod};
7use cubecl_core::server::ServerUtilities;
8use cubecl_core::{CubeCount, CubeDim, Runtime, ir::TargetProperties};
9pub use cubecl_runtime::memory_management::MemoryConfiguration;
10use cubecl_runtime::memory_management::MemoryDeviceProperties;
11use cubecl_runtime::{DeviceProperties, memory_management::HardwareProperties};
12use cubecl_runtime::{
13    client::ComputeClient,
14    logging::{ProfileLevel, ServerLogger},
15};
16use wgpu::{InstanceFlags, RequestAdapterOptions};
17
18/// Runtime that uses the [wgpu] crate with the wgsl compiler. This is used in the Wgpu backend.
19/// For advanced configuration, use [`init_setup`] to pass in runtime options or to select a
20/// specific graphics API.
21#[derive(Debug)]
22pub struct WgpuRuntime;
23
24impl DeviceState for WgpuServer {
25    fn init(device_id: cubecl_common::device::DeviceId) -> Self {
26        let device = WgpuDevice::from_id(device_id);
27        let setup = future::block_on(create_setup_for_device(&device, AutoGraphicsApi::backend()));
28        create_server(setup, RuntimeOptions::default())
29    }
30}
31
32impl Runtime for WgpuRuntime {
33    type Compiler = AutoCompiler;
34    type Server = WgpuServer;
35    type Device = WgpuDevice;
36
37    fn client(device: &Self::Device) -> ComputeClient<Self> {
38        ComputeClient::load(device)
39    }
40
41    fn name(client: &ComputeClient<Self>) -> &'static str {
42        match client.info() {
43            wgpu::Backend::Vulkan => {
44                #[cfg(feature = "spirv")]
45                return "wgpu<spirv>";
46
47                #[cfg(not(feature = "spirv"))]
48                return "wgpu<wgsl>";
49            }
50            wgpu::Backend::Metal => {
51                #[cfg(feature = "msl")]
52                return "wgpu<msl>";
53
54                #[cfg(not(feature = "msl"))]
55                return "wgpu<wgsl>";
56            }
57            _ => "wgpu<wgsl>",
58        }
59    }
60
61    fn supported_line_sizes() -> &'static [u8] {
62        #[cfg(feature = "msl")]
63        {
64            &[8, 4, 2, 1]
65        }
66        #[cfg(not(feature = "msl"))]
67        {
68            &[4, 2, 1]
69        }
70    }
71
72    fn max_global_line_size() -> u8 {
73        4
74    }
75
76    fn max_cube_count() -> (u32, u32, u32) {
77        let max_dim = u16::MAX as u32;
78        (max_dim, max_dim, max_dim)
79    }
80
81    fn can_read_tensor(shape: &[usize], strides: &[usize]) -> bool {
82        if shape.is_empty() {
83            return true;
84        }
85
86        for (expected, &stride) in contiguous_strides(shape).into_iter().zip(strides) {
87            if expected != stride {
88                return false;
89            }
90        }
91
92        true
93    }
94
95    fn target_properties() -> TargetProperties {
96        TargetProperties {
97            // Values are irrelevant, since no wgsl backends currently support manual mma
98            mma: Default::default(),
99        }
100    }
101}
102
103/// The values that control how a WGPU Runtime will perform its calculations.
104pub struct RuntimeOptions {
105    /// Control the amount of compute tasks to be aggregated into a single GPU command.
106    pub tasks_max: usize,
107    /// Configures the memory management.
108    pub memory_config: MemoryConfiguration,
109}
110
111impl Default for RuntimeOptions {
112    fn default() -> Self {
113        #[cfg(test)]
114        const DEFAULT_MAX_TASKS: usize = 1;
115        #[cfg(not(test))]
116        const DEFAULT_MAX_TASKS: usize = 32;
117
118        let tasks_max = match std::env::var("CUBECL_WGPU_MAX_TASKS") {
119            Ok(value) => value
120                .parse::<usize>()
121                .expect("CUBECL_WGPU_MAX_TASKS should be a positive integer."),
122            Err(_) => DEFAULT_MAX_TASKS,
123        };
124
125        Self {
126            tasks_max,
127            memory_config: MemoryConfiguration::default(),
128        }
129    }
130}
131
132/// A complete setup used to run wgpu.
133///
134/// These can either be created with [`init_setup`] or [`init_setup_async`].
135#[derive(Clone, Debug)]
136pub struct WgpuSetup {
137    /// The underlying wgpu instance.
138    pub instance: wgpu::Instance,
139    /// The selected 'adapter'. This corresponds to a physical device.
140    pub adapter: wgpu::Adapter,
141    /// The wgpu device Burn will use. Nb: There can only be one device per adapter.
142    pub device: wgpu::Device,
143    /// The queue Burn commands will be submitted to.
144    pub queue: wgpu::Queue,
145    /// The backend used by the setup.
146    pub backend: wgpu::Backend,
147}
148
149/// Create a [`WgpuDevice`] on an existing [`WgpuSetup`].
150/// Useful when you want to share a device between CubeCL and other wgpu-dependent libraries.
151///
152/// # Note
153///
154/// Please **do not** to call on the same [`setup`](WgpuSetup) more than once.
155///
156/// This function generates a new, globally unique ID for the device every time it is called,
157/// even if called on the same device multiple times.
158pub fn init_device(setup: WgpuSetup, options: RuntimeOptions) -> WgpuDevice {
159    use core::sync::atomic::{AtomicU32, Ordering};
160
161    static COUNTER: AtomicU32 = AtomicU32::new(0);
162
163    let device_id = COUNTER.fetch_add(1, Ordering::Relaxed);
164    if device_id == u32::MAX {
165        core::panic!("Memory ID overflowed");
166    }
167
168    let device_id = WgpuDevice::Existing(device_id);
169    let server = create_server(setup, options);
170    let _ = ComputeClient::<WgpuRuntime>::init(&device_id, server);
171    device_id
172}
173
174/// Like [`init_setup_async`], but synchronous.
175/// On wasm, it is necessary to use [`init_setup_async`] instead.
176pub fn init_setup<G: GraphicsApi>(device: &WgpuDevice, options: RuntimeOptions) -> WgpuSetup {
177    cfg_if::cfg_if! {
178        if #[cfg(target_family = "wasm")] {
179            let _ = (device, options);
180            panic!("Creating a wgpu setup synchronously is unsupported on wasm. Use init_async instead");
181        } else {
182            future::block_on(init_setup_async::<G>(device, options))
183        }
184    }
185}
186
187/// Initialize a client on the given device with the given options.
188/// This function is useful to configure the runtime options
189/// or to pick a different graphics API.
190pub async fn init_setup_async<G: GraphicsApi>(
191    device: &WgpuDevice,
192    options: RuntimeOptions,
193) -> WgpuSetup {
194    let setup = create_setup_for_device(device, G::backend()).await;
195    let return_setup = setup.clone();
196    let server = create_server(setup, options);
197    let _ = ComputeClient::<WgpuRuntime>::init(device, server);
198    return_setup
199}
200
201pub(crate) fn create_server(setup: WgpuSetup, options: RuntimeOptions) -> WgpuServer {
202    let limits = setup.device.limits();
203    let mut adapter_limits = setup.adapter.limits();
204
205    // Workaround: WebGPU reports some "fake" subgroup info atm, as it's not really supported yet.
206    // However, some algorithms do rely on having this information eg. cubecl-reduce uses max subgroup size _even_ when
207    // subgroups aren't used. For now, just override with the maximum range of subgroups possible.
208    if adapter_limits.min_subgroup_size == 0 && adapter_limits.max_subgroup_size == 0 {
209        // There is in theory nothing limiting the size to go below 8 but in practice 8 is the minimum found anywhere.
210        adapter_limits.min_subgroup_size = 8;
211        // This is a hard limit of GPU APIs (subgroup ballot returns 4 * 32 bits).
212        adapter_limits.max_subgroup_size = 128;
213    }
214
215    let mem_props = MemoryDeviceProperties {
216        max_page_size: limits.max_storage_buffer_binding_size as u64,
217        alignment: limits.min_storage_buffer_offset_alignment as u64,
218    };
219    let max_count = adapter_limits.max_compute_workgroups_per_dimension;
220    let hardware_props = HardwareProperties {
221        load_width: 128,
222        // On Apple Silicon, the plane size is 32,
223        // though the minimum and maximum differ.
224        // https://github.com/gpuweb/gpuweb/issues/3950
225        #[cfg(apple_silicon)]
226        plane_size_min: 32,
227        #[cfg(not(apple_silicon))]
228        plane_size_min: adapter_limits.min_subgroup_size,
229        #[cfg(apple_silicon)]
230        plane_size_max: 32,
231        #[cfg(not(apple_silicon))]
232        plane_size_max: adapter_limits.max_subgroup_size,
233        // wgpu uses an additional buffer for variable-length buffers,
234        // so we have to use one buffer less on our side to make room for that wgpu internal buffer.
235        // See: https://github.com/gfx-rs/wgpu/blob/a9638c8e3ac09ce4f27ac171f8175671e30365fd/wgpu-hal/src/metal/device.rs#L799
236        max_bindings: limits
237            .max_storage_buffers_per_shader_stage
238            .saturating_sub(1),
239        max_shared_memory_size: limits.max_compute_workgroup_storage_size as usize,
240        max_cube_count: CubeCount::new_3d(max_count, max_count, max_count),
241        max_units_per_cube: adapter_limits.max_compute_invocations_per_workgroup,
242        max_cube_dim: CubeDim::new_3d(
243            adapter_limits.max_compute_workgroup_size_x,
244            adapter_limits.max_compute_workgroup_size_y,
245            adapter_limits.max_compute_workgroup_size_z,
246        ),
247        num_streaming_multiprocessors: None,
248        num_tensor_cores: None,
249        min_tensor_cores_dim: None,
250        num_cpu_cores: None, // TODO: Check if device is CPU.
251    };
252
253    let mut compilation_options = Default::default();
254
255    let features = setup.adapter.features();
256
257    let time_measurement = if features.contains(wgpu::Features::TIMESTAMP_QUERY) {
258        TimingMethod::Device
259    } else {
260        TimingMethod::System
261    };
262
263    let mut device_props = DeviceProperties::new(
264        Default::default(),
265        mem_props.clone(),
266        hardware_props,
267        time_measurement,
268    );
269
270    #[cfg(not(all(target_os = "macos", feature = "msl")))]
271    {
272        if features.contains(wgpu::Features::SUBGROUP)
273            && setup.adapter.get_info().device_type != wgpu::DeviceType::Cpu
274        {
275            use cubecl_runtime::Plane;
276
277            device_props.features.plane.insert(Plane::Ops);
278        }
279    }
280
281    #[cfg(any(feature = "spirv", feature = "msl"))]
282    device_props
283        .features
284        .plane
285        .insert(cubecl_runtime::Plane::NonUniformControlFlow);
286
287    backend::register_features(&setup.adapter, &mut device_props, &mut compilation_options);
288
289    let logger = alloc::sync::Arc::new(ServerLogger::default());
290
291    WgpuServer::new(
292        mem_props,
293        options.memory_config,
294        compilation_options,
295        setup.device.clone(),
296        setup.queue,
297        options.tasks_max,
298        setup.backend,
299        time_measurement,
300        ServerUtilities::new(device_props, logger, setup.backend),
301    )
302}
303
304/// Select the wgpu device and queue based on the provided [device](WgpuDevice) and
305/// [backend](wgpu::Backend).
306pub(crate) async fn create_setup_for_device(
307    device: &WgpuDevice,
308    backend: wgpu::Backend,
309) -> WgpuSetup {
310    let (instance, adapter) = request_adapter(device, backend).await;
311    let (device, queue) = backend::request_device(&adapter).await;
312
313    log::info!(
314        "Created wgpu compute server on device {:?} => {:?}",
315        device,
316        adapter.get_info()
317    );
318
319    WgpuSetup {
320        instance,
321        adapter,
322        device,
323        queue,
324        backend,
325    }
326}
327
328async fn request_adapter(
329    device: &WgpuDevice,
330    backend: wgpu::Backend,
331) -> (wgpu::Instance, wgpu::Adapter) {
332    let debug = ServerLogger::default();
333    let instance_flags = match (debug.profile_level(), debug.compilation_activated()) {
334        (Some(ProfileLevel::Full), _) => InstanceFlags::advanced_debugging(),
335        (_, true) => InstanceFlags::debugging(),
336        (_, false) => InstanceFlags::default(),
337    };
338    log::debug!("{instance_flags:?}");
339    let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor {
340        backends: backend.into(),
341        flags: instance_flags,
342        ..Default::default()
343    });
344
345    #[allow(deprecated)]
346    let override_device = if matches!(
347        device,
348        WgpuDevice::DefaultDevice | WgpuDevice::BestAvailable
349    ) {
350        get_device_override()
351    } else {
352        None
353    };
354
355    let device = override_device.unwrap_or_else(|| device.clone());
356
357    let adapter = match device {
358        #[cfg(not(target_family = "wasm"))]
359        WgpuDevice::DiscreteGpu(num) => select_from_adapter_list(
360            num,
361            "No Discrete GPU device found",
362            &instance,
363            &device,
364            backend,
365        ),
366        #[cfg(not(target_family = "wasm"))]
367        WgpuDevice::IntegratedGpu(num) => select_from_adapter_list(
368            num,
369            "No Integrated GPU device found",
370            &instance,
371            &device,
372            backend,
373        ),
374        #[cfg(not(target_family = "wasm"))]
375        WgpuDevice::VirtualGpu(num) => select_from_adapter_list(
376            num,
377            "No Virtual GPU device found",
378            &instance,
379            &device,
380            backend,
381        ),
382        #[cfg(not(target_family = "wasm"))]
383        WgpuDevice::Cpu => {
384            select_from_adapter_list(0, "No CPU device found", &instance, &device, backend)
385        }
386        WgpuDevice::Existing(_) => {
387            unreachable!("Cannot select an adapter for an existing device.")
388        }
389        _ => instance
390            .request_adapter(&RequestAdapterOptions {
391                power_preference: wgpu::PowerPreference::HighPerformance,
392                force_fallback_adapter: false,
393                compatible_surface: None,
394            })
395            .await
396            .expect("No possible adapter available for backend. Falling back to first available."),
397    };
398
399    log::info!("Using adapter {:?}", adapter.get_info());
400
401    (instance, adapter)
402}
403
404#[cfg(not(target_family = "wasm"))]
405fn select_from_adapter_list(
406    num: usize,
407    error: &str,
408    instance: &wgpu::Instance,
409    device: &WgpuDevice,
410    backend: wgpu::Backend,
411) -> wgpu::Adapter {
412    let mut adapters_other = Vec::new();
413    let mut adapters = Vec::new();
414
415    instance
416        .enumerate_adapters(backend.into())
417        .into_iter()
418        .for_each(|adapter| {
419            let device_type = adapter.get_info().device_type;
420
421            if let wgpu::DeviceType::Other = device_type {
422                adapters_other.push(adapter);
423                return;
424            }
425
426            let is_same_type = match device {
427                WgpuDevice::DiscreteGpu(_) => device_type == wgpu::DeviceType::DiscreteGpu,
428                WgpuDevice::IntegratedGpu(_) => device_type == wgpu::DeviceType::IntegratedGpu,
429                WgpuDevice::VirtualGpu(_) => device_type == wgpu::DeviceType::VirtualGpu,
430                WgpuDevice::Cpu => device_type == wgpu::DeviceType::Cpu,
431                #[allow(deprecated)]
432                WgpuDevice::DefaultDevice | WgpuDevice::BestAvailable => true,
433                WgpuDevice::Existing(_) => {
434                    unreachable!("Cannot select an adapter for an existing device.")
435                }
436            };
437
438            if is_same_type {
439                adapters.push(adapter);
440            }
441        });
442
443    if adapters.len() <= num {
444        if adapters_other.len() <= num {
445            panic!(
446                "{}, adapters {:?}, other adapters {:?}",
447                error,
448                adapters
449                    .into_iter()
450                    .map(|adapter| adapter.get_info())
451                    .collect::<Vec<_>>(),
452                adapters_other
453                    .into_iter()
454                    .map(|adapter| adapter.get_info())
455                    .collect::<Vec<_>>(),
456            );
457        }
458
459        return adapters_other.remove(num);
460    }
461
462    adapters.remove(num)
463}
464
465fn get_device_override() -> Option<WgpuDevice> {
466    // If BestAvailable, check if we should instead construct as
467    // if a specific device was specified.
468    std::env::var("CUBECL_WGPU_DEFAULT_DEVICE")
469        .ok()
470        .and_then(|var| {
471            let override_device = if let Some(inner) = var.strip_prefix("DiscreteGpu(") {
472                inner
473                    .strip_suffix(")")
474                    .and_then(|s| s.parse().ok())
475                    .map(WgpuDevice::DiscreteGpu)
476            } else if let Some(inner) = var.strip_prefix("IntegratedGpu(") {
477                inner
478                    .strip_suffix(")")
479                    .and_then(|s| s.parse().ok())
480                    .map(WgpuDevice::IntegratedGpu)
481            } else if let Some(inner) = var.strip_prefix("VirtualGpu(") {
482                inner
483                    .strip_suffix(")")
484                    .and_then(|s| s.parse().ok())
485                    .map(WgpuDevice::VirtualGpu)
486            } else if var == "Cpu" {
487                Some(WgpuDevice::Cpu)
488            } else {
489                None
490            };
491
492            if override_device.is_none() {
493                log::warn!("Unknown CUBECL_WGPU_DEVICE override {var}");
494            }
495            override_device
496        })
497}