Skip to main content

cubecl_spirv/
compiler.rs

1use crate::{
2    SpirvKernel,
3    debug::DebugInfo,
4    item::Item,
5    lookups::LookupTables,
6    target::{GLCompute, SpirvTarget},
7    transformers::{BitwiseTransform, ErfTransform, HypotTransform, RhypotTransform},
8};
9use cubecl_common::backtrace::BackTrace;
10use cubecl_core::{
11    Compiler, CubeDim, Metadata, WgpuCompilationOptions,
12    ir::{self as core, ElemType, InstructionModes, StorageType, UIntKind, features::EnumSet},
13    post_processing::{
14        checked_io::CheckedIoProcessor, saturating::SaturatingArithmeticProcessor,
15        unroll::UnrollProcessor,
16    },
17    prelude::{FastMath, KernelDefinition},
18    server::ExecutionMode,
19};
20use cubecl_opt::{BasicBlock, NodeIndex, Optimizer, OptimizerBuilder, SharedLiveness, Uniformity};
21use cubecl_runtime::{
22    compiler::CompilationError,
23    config::{GlobalConfig, compilation::CompilationLogLevel},
24};
25use rspirv::{
26    binary::Assemble,
27    dr::{Builder, InsertPoint, Instruction, Module, Operand},
28    spirv::{BuiltIn, Capability, Decoration, FPFastMathMode, Op, StorageClass, Word},
29};
30use std::{
31    collections::HashSet,
32    fmt::Debug,
33    mem::take,
34    ops::{Deref, DerefMut},
35    rc::Rc,
36    sync::Arc,
37};
38
39pub const MAX_VECTORIZATION: usize = 4;
40
41pub struct SpirvCompiler<Target: SpirvTarget = GLCompute> {
42    pub target: Target,
43    pub(crate) builder: Builder,
44
45    pub cube_dim: CubeDim,
46    pub mode: ExecutionMode,
47    pub addr_type: StorageType,
48    pub debug_symbols: bool,
49    global_invocation_id: Word,
50    num_workgroups: Word,
51    pub setup_block: usize,
52    pub opt: Rc<Optimizer>,
53    pub uniformity: Rc<Uniformity>,
54    pub shared_liveness: Rc<SharedLiveness>,
55    pub current_block: Option<NodeIndex>,
56    pub visited: HashSet<NodeIndex>,
57
58    pub capabilities: HashSet<Capability>,
59    pub state: LookupTables,
60    pub ext_meta_pos: Vec<u32>,
61    pub metadata: Metadata,
62    pub debug_info: Option<DebugInfo>,
63    pub compilation_options: WgpuCompilationOptions,
64}
65
66unsafe impl<T: SpirvTarget> Send for SpirvCompiler<T> {}
67unsafe impl<T: SpirvTarget> Sync for SpirvCompiler<T> {}
68
69impl<T: SpirvTarget> Clone for SpirvCompiler<T> {
70    fn clone(&self) -> Self {
71        Self {
72            target: self.target.clone(),
73            builder: Builder::new_from_module(self.module_ref().clone()),
74            cube_dim: self.cube_dim,
75            mode: self.mode,
76            addr_type: self.addr_type,
77            global_invocation_id: self.global_invocation_id,
78            num_workgroups: self.num_workgroups,
79            setup_block: self.setup_block,
80            opt: self.opt.clone(),
81            uniformity: self.uniformity.clone(),
82            shared_liveness: self.shared_liveness.clone(),
83            current_block: self.current_block,
84            capabilities: self.capabilities.clone(),
85            state: self.state.clone(),
86            debug_symbols: self.debug_symbols,
87            visited: self.visited.clone(),
88            metadata: self.metadata.clone(),
89            debug_info: self.debug_info.clone(),
90            ext_meta_pos: self.ext_meta_pos.clone(),
91            compilation_options: self.compilation_options.clone(),
92        }
93    }
94}
95
96fn debug_symbols_activated() -> bool {
97    matches!(
98        GlobalConfig::get().compilation.logger.level,
99        CompilationLogLevel::Full
100    )
101}
102
103impl<T: SpirvTarget> Default for SpirvCompiler<T> {
104    fn default() -> Self {
105        Self {
106            target: Default::default(),
107            builder: Builder::new(),
108            cube_dim: CubeDim::new_single(),
109            mode: Default::default(),
110            addr_type: ElemType::UInt(UIntKind::U32).into(),
111            global_invocation_id: Default::default(),
112            num_workgroups: Default::default(),
113            capabilities: Default::default(),
114            state: Default::default(),
115            setup_block: Default::default(),
116            opt: Default::default(),
117            uniformity: Default::default(),
118            shared_liveness: Default::default(),
119            current_block: Default::default(),
120            debug_symbols: debug_symbols_activated(),
121            visited: Default::default(),
122            metadata: Default::default(),
123            debug_info: Default::default(),
124            ext_meta_pos: Default::default(),
125            compilation_options: Default::default(),
126        }
127    }
128}
129
130impl<T: SpirvTarget> Deref for SpirvCompiler<T> {
131    type Target = Builder;
132
133    fn deref(&self) -> &Self::Target {
134        &self.builder
135    }
136}
137
138impl<T: SpirvTarget> DerefMut for SpirvCompiler<T> {
139    fn deref_mut(&mut self) -> &mut Self::Target {
140        &mut self.builder
141    }
142}
143
144impl<T: SpirvTarget> Compiler for SpirvCompiler<T> {
145    type Representation = SpirvKernel;
146    type CompilationOptions = WgpuCompilationOptions;
147
148    fn compile(
149        &mut self,
150        mut value: KernelDefinition,
151        compilation_options: &Self::CompilationOptions,
152        mode: ExecutionMode,
153        addr_type: StorageType,
154    ) -> Result<Self::Representation, CompilationError> {
155        let errors = value.body.pop_errors();
156        if !errors.is_empty() {
157            let mut reason = "Can't compile spirv kernel".to_string();
158            for error in errors {
159                reason += error.as_str();
160                reason += "\n";
161            }
162
163            return Err(CompilationError::Validation {
164                reason,
165                backtrace: BackTrace::capture(),
166            });
167        }
168
169        let bindings = value.buffers.clone();
170        let mut ext_meta_pos = Vec::new();
171        let mut num_ext = 0;
172
173        let mut all_meta: Vec<_> = value
174            .buffers
175            .iter()
176            .chain(value.tensor_maps.iter())
177            .map(|buf| (buf.id, buf.has_extended_meta))
178            .collect();
179        all_meta.sort_by_key(|(id, _)| *id);
180
181        let num_meta = all_meta.len();
182
183        for (_, has_extended_meta) in all_meta.iter() {
184            ext_meta_pos.push(num_ext);
185            if *has_extended_meta {
186                num_ext += 1;
187            }
188        }
189
190        self.cube_dim = value.cube_dim;
191        self.mode = mode;
192        self.addr_type = addr_type;
193        self.metadata = Metadata::new(num_meta as u32, num_ext);
194        self.compilation_options = compilation_options.clone();
195        self.ext_meta_pos = ext_meta_pos;
196
197        let (module, optimizer, shared_size) = self.compile_kernel(value);
198
199        Ok(SpirvKernel {
200            assembled_module: module.assemble(),
201            module: Some(Arc::new(module)),
202            optimizer: Some(Arc::new(optimizer)),
203            bindings: bindings.iter().map(|it| it.visibility).collect(),
204            shared_size,
205        })
206    }
207
208    fn elem_size(&self, elem: core::ElemType) -> usize {
209        elem.size()
210    }
211
212    fn extension(&self) -> &'static str {
213        "spv"
214    }
215}
216
217impl<Target: SpirvTarget> Debug for SpirvCompiler<Target> {
218    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
219        write!(f, "spirv<{:?}>", self.target)
220    }
221}
222
223impl<Target: SpirvTarget> SpirvCompiler<Target> {
224    pub fn compile_kernel(&mut self, kernel: KernelDefinition) -> (Module, Optimizer, usize) {
225        let options = kernel.options.clone();
226
227        self.debug_symbols = debug_symbols_activated() || options.debug_symbols;
228
229        self.set_version(1, 6);
230
231        let mut target = self.target.clone();
232
233        let mut opt = OptimizerBuilder::default()
234            .with_transformer(ErfTransform)
235            .with_transformer(BitwiseTransform)
236            .with_transformer(HypotTransform)
237            .with_transformer(RhypotTransform)
238            .with_processor(CheckedIoProcessor::new(self.mode))
239            .with_processor(UnrollProcessor::new(MAX_VECTORIZATION))
240            .with_processor(SaturatingArithmeticProcessor::new(true))
241            .optimize(kernel.body.clone(), kernel.cube_dim);
242
243        self.uniformity = opt.analysis::<Uniformity>();
244        self.shared_liveness = opt.analysis::<SharedLiveness>();
245        self.opt = Rc::new(opt);
246
247        self.init_state(kernel.clone());
248        self.init_debug();
249
250        let cube_dims = vec![kernel.cube_dim.x, kernel.cube_dim.y, kernel.cube_dim.z];
251
252        target.set_kernel_name(options.kernel_name.clone());
253
254        let (main, debug_setup) = self.declare_main(&options.kernel_name);
255
256        let setup = self.id();
257        self.debug_name(setup, "setup");
258
259        let entry = self.opt.entry();
260        let body = self.label(entry);
261        let setup_block = self.setup(setup, debug_setup);
262        self.setup_block = setup_block;
263        self.compile_block(entry);
264
265        let ret = self.opt.ret;
266        self.compile_block(ret);
267
268        if self.selected_block().is_some() {
269            let label = self.label(ret);
270            self.branch(label).unwrap();
271        }
272
273        self.select_block(Some(setup_block)).unwrap();
274        self.branch(body).unwrap();
275
276        self.end_function().unwrap();
277
278        let shared_size = self.declare_shared_memories();
279
280        let builtins = self
281            .state
282            .used_builtins
283            .clone()
284            .into_iter()
285            .map(|(builtin, (id, item))| {
286                let ty = Item::Pointer(StorageClass::Input, Box::new(item)).id(self);
287                self.variable(ty, Some(id), StorageClass::Input, None);
288                self.decorate(id, Decoration::BuiltIn, vec![builtin.into()]);
289                id
290            })
291            .collect::<Vec<_>>();
292
293        target.set_modes(self, main, builtins, cube_dims);
294
295        let module = take(&mut self.builder).module();
296        (module, self.opt.as_ref().clone(), shared_size)
297    }
298
299    fn setup(&mut self, label: Word, debug_setup: impl Fn(&mut Self)) -> usize {
300        self.begin_block(Some(label)).unwrap();
301
302        let opt = self.opt.clone();
303        for const_arr in opt.const_arrays() {
304            self.register_const_array(const_arr);
305        }
306
307        debug_setup(self);
308
309        let setup_block = self.selected_block().unwrap();
310        self.select_block(None).unwrap();
311        setup_block
312    }
313
314    #[track_caller]
315    pub fn current_block(&self) -> BasicBlock {
316        self.opt.block(self.current_block.unwrap()).clone()
317    }
318
319    pub fn builtin(&mut self, builtin: BuiltIn, item: Item) -> Word {
320        if let Some(existing) = self.state.used_builtins.get(&builtin) {
321            existing.0
322        } else {
323            let id = self.id();
324            self.state.used_builtins.insert(builtin, (id, item));
325            id
326        }
327    }
328
329    pub fn compile_block(&mut self, block: NodeIndex) {
330        if self.visited.contains(&block) {
331            return;
332        }
333        self.visited.insert(block);
334        self.current_block = Some(block);
335
336        let label = self.label(block);
337        self.begin_block(Some(label)).unwrap();
338        let block_id = self.selected_block().unwrap();
339
340        self.debug_start_block();
341
342        let operations = self.current_block().ops.borrow().clone();
343        for (_, operation) in operations {
344            self.compile_operation(operation);
345        }
346
347        let control_flow = self.current_block().control_flow.borrow().clone();
348        self.compile_control_flow(control_flow);
349
350        let current = self.selected_block();
351        self.select_block(Some(block_id)).unwrap();
352        let phi = { self.opt.block(block).phi_nodes.borrow().clone() };
353        for phi in phi {
354            let out = self.compile_variable(phi.out);
355            let ty = out.item().id(self);
356            let out_id = self.write_id(&out);
357            let entries: Vec<_> = phi
358                .entries
359                .into_iter()
360                .map(|it| {
361                    let label = self.end_label(it.block);
362                    let value = self.compile_variable(it.value);
363                    let value = self.read(&value);
364                    (value, label)
365                })
366                .collect();
367            self.insert_phi(InsertPoint::Begin, ty, Some(out_id), entries)
368                .unwrap();
369        }
370        self.select_block(current).unwrap();
371    }
372
373    // Declare variable in the first block of the function
374    pub fn declare_function_variable(&mut self, ty: Word) -> Word {
375        let setup = self.setup_block;
376        let id = self.id();
377        let var = Instruction::new(
378            Op::Variable,
379            Some(ty),
380            Some(id),
381            vec![Operand::StorageClass(StorageClass::Function)],
382        );
383        let current_block = self.selected_block();
384        self.select_block(Some(setup)).unwrap();
385        self.insert_into_block(InsertPoint::Begin, var).unwrap();
386        self.select_block(current_block).unwrap();
387        id
388    }
389
390    fn declare_shared_memories(&mut self) -> usize {
391        if self.compilation_options.supports_explicit_smem {
392            self.declare_shared_memories_explicit() as usize
393        } else {
394            self.declare_shared_memories_implicit() as usize
395        }
396    }
397
398    /// When using `VK_KHR_workgroup_memory_explicit_layout`, all shared memory is declared as a
399    /// `Block`. This means they are all pointers into the same chunk of memory, with different
400    /// offsets and sizes. Unlike C++, this shared block is declared implicitly, not explicitly.
401    /// Alignment and total size is calculated by the driver.
402    fn declare_shared_memories_explicit(&mut self) -> u32 {
403        let mut shared_size = 0;
404
405        let shared_arrays = self.state.shared_arrays.clone();
406        let shared = self.state.shared.clone();
407        if shared_arrays.is_empty() && shared.is_empty() {
408            return shared_size;
409        }
410
411        self.capabilities
412            .insert(Capability::WorkgroupMemoryExplicitLayoutKHR);
413
414        for (index, memory) in shared_arrays {
415            let item_size = memory.item.size();
416            shared_size = shared_size.max(memory.offset + memory.len * item_size);
417
418            // It's safe to assume that if 8-bit/16-bit types are supported, they're supported for
419            // explicit layout as well.
420            match item_size {
421                1 => {
422                    self.capabilities
423                        .insert(Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
424                }
425                2 => {
426                    self.capabilities
427                        .insert(Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
428                }
429                _ => {}
430            }
431
432            let arr_ty = Item::Array(Box::new(memory.item), memory.len);
433            let arr_id = arr_ty.id(self);
434
435            if !self.state.decorated_types.contains(&arr_id) {
436                self.decorate(
437                    arr_id,
438                    Decoration::ArrayStride,
439                    [Operand::LiteralBit32(item_size)],
440                );
441                self.state.decorated_types.insert(arr_id);
442            }
443
444            let block_ty = Item::Struct(vec![arr_ty]);
445            let block_id = block_ty.id(self);
446
447            self.decorate(block_id, Decoration::Block, []);
448            self.member_decorate(
449                block_id,
450                0,
451                Decoration::Offset,
452                [Operand::LiteralBit32(memory.offset)],
453            );
454
455            let ptr_ty = self.type_pointer(None, StorageClass::Workgroup, block_id);
456
457            self.debug_shared(memory.id, index);
458            self.variable(ptr_ty, Some(memory.id), StorageClass::Workgroup, None);
459            self.decorate(memory.id, Decoration::Aliased, []);
460        }
461
462        for (index, memory) in shared {
463            let item_size = memory.item.size();
464            shared_size = shared_size.max(memory.offset + item_size);
465
466            // It's safe to assume that if 8-bit/16-bit types are supported, they're supported for
467            // explicit layout as well.
468            match item_size {
469                1 => {
470                    self.capabilities
471                        .insert(Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
472                }
473                2 => {
474                    self.capabilities
475                        .insert(Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
476                }
477                _ => {}
478            }
479
480            let block_ty = Item::Struct(vec![memory.item]);
481            let block_id = block_ty.id(self);
482
483            self.decorate(block_id, Decoration::Block, []);
484            self.member_decorate(
485                block_id,
486                0,
487                Decoration::Offset,
488                [Operand::LiteralBit32(memory.offset)],
489            );
490
491            let ptr_ty = self.type_pointer(None, StorageClass::Workgroup, block_id);
492
493            self.debug_shared(memory.id, index);
494            self.variable(ptr_ty, Some(memory.id), StorageClass::Workgroup, None);
495            self.decorate(memory.id, Decoration::Aliased, []);
496        }
497
498        shared_size
499    }
500
501    fn declare_shared_memories_implicit(&mut self) -> u32 {
502        let mut shared_size = 0;
503        let shared_memories = self.state.shared_arrays.clone();
504        for (index, memory) in shared_memories {
505            shared_size += memory.len * memory.item.size();
506
507            let arr_ty = Item::Array(Box::new(memory.item), memory.len);
508            let ptr_ty = Item::Pointer(StorageClass::Workgroup, Box::new(arr_ty)).id(self);
509
510            self.debug_shared(memory.id, index);
511            self.variable(ptr_ty, Some(memory.id), StorageClass::Workgroup, None);
512        }
513        let shared = self.state.shared.clone();
514        for (index, memory) in shared {
515            shared_size += memory.item.size();
516
517            let ptr_ty = Item::Pointer(StorageClass::Workgroup, Box::new(memory.item)).id(self);
518
519            self.debug_shared(memory.id, index);
520            self.variable(ptr_ty, Some(memory.id), StorageClass::Workgroup, None);
521        }
522        shared_size
523    }
524
525    pub fn declare_math_mode(&mut self, modes: InstructionModes, out_id: Word) {
526        if !self.compilation_options.supports_fp_fast_math || modes.fp_math_mode.is_empty() {
527            return;
528        }
529        let mode = convert_math_mode(modes.fp_math_mode);
530        self.capabilities.insert(Capability::FloatControls2);
531        self.decorate(
532            out_id,
533            Decoration::FPFastMathMode,
534            [Operand::FPFastMathMode(mode)],
535        );
536    }
537
538    pub fn is_uniform_block(&self) -> bool {
539        self.uniformity
540            .is_block_uniform(self.current_block.unwrap())
541    }
542}
543
544pub(crate) fn convert_math_mode(math_mode: EnumSet<FastMath>) -> FPFastMathMode {
545    let mut flags = FPFastMathMode::NONE;
546
547    for mode in math_mode.iter() {
548        match mode {
549            FastMath::NotNaN => flags |= FPFastMathMode::NOT_NAN,
550            FastMath::NotInf => flags |= FPFastMathMode::NOT_INF,
551            FastMath::UnsignedZero => flags |= FPFastMathMode::NSZ,
552            FastMath::AllowReciprocal => flags |= FPFastMathMode::ALLOW_RECIP,
553            FastMath::AllowContraction => flags |= FPFastMathMode::ALLOW_CONTRACT,
554            FastMath::AllowReassociation => flags |= FPFastMathMode::ALLOW_REASSOC,
555            FastMath::AllowTransform => {
556                flags |= FPFastMathMode::ALLOW_CONTRACT
557                    | FPFastMathMode::ALLOW_REASSOC
558                    | FPFastMathMode::ALLOW_TRANSFORM
559            }
560            _ => {}
561        }
562    }
563
564    flags
565}